diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 69e8c0b2b9..35170b8053 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -196,6 +196,22 @@ jobs: retention-days: 3 + build-example: + name: Build Grafana Dashboard Example + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v3 + with: + go-version-file: 'go.mod' + - name: Setup build environment + run: | + go install github.com/goreleaser/nfpm/v2/cmd/nfpm@${{ env.NFPM_VERSION }} + - name: Build Grafana example + run: make build-grafana-example + - name: Clean Grafana example + run: make clean-grafana-example + trigger-release-workflow: if: ${{ startsWith(github.ref_name, 'release-') && !github.event.pull_request.head.repo.fork }} needs: [ lint, unit-test, component-test, performance-test ] diff --git a/Makefile b/Makefile index c10cde4119..02d2af4efc 100644 --- a/Makefile +++ b/Makefile @@ -33,11 +33,20 @@ CERTS_DIR := ./build/certs PACKAGE_PREFIX := nginx-agent PACKAGES_REPO := "pkgs.nginx.com" OS := $(shell uname -s | tr '[:upper:]' '[:lower:]') -OSARCH := $(shell uname -m) +# override this value if you want to change the architecture. GOOS options here: https://gist.github.com/asukakenji/f15ba7e588ac42795f421b48b8aede63 +uname_m := $(shell uname -m) +ifeq ($(uname_m),aarch64) + OSARCH = arm64 +else + ifeq ($(uname_m),x86_64) + OSARCH = amd64 + else + OSARCH = $(uname_m) + endif +endif + TEST_BUILD_DIR := build/test PACKAGE_NAME := "${PACKAGE_PREFIX}-$(shell echo ${VERSION} | tr -d 'v')-SNAPSHOT-${COMMIT}" -# override this value if you want to change the architecture. GOOS options here: https://gist.github.com/asukakenji/f15ba7e588ac42795f421b48b8aede63 -LOCAL_ARCH := amd64 CERT_CLIENT_CA_CN := client-ca.local CERT_CLIENT_INT_CN := client-int.local @@ -112,19 +121,19 @@ launch-swagger-ui: generate-swagger ## Launch Swagger UI # Local Packaging # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # local-apk-package: ## Create local apk package - GOWORK=off CGO_ENABLED=0 GOARCH=${LOCAL_ARCH} GOOS=linux go build -ldflags=${DEBUG_LDFLAGS} -o ./build/nginx-agent - ARCH=${LOCAL_ARCH} VERSION=$(shell echo ${VERSION} | tr -d 'v') nfpm pkg --config ./scripts/.local-nfpm.yaml --packager apk --target ./build/${PACKAGE_PREFIX}-$(shell echo ${VERSION} | tr -d 'v')-SNAPSHOT-${COMMIT}.apk; + GOWORK=off CGO_ENABLED=0 GOARCH=${OSARCH} GOOS=linux go build -ldflags=${DEBUG_LDFLAGS} -o ./build/nginx-agent + ARCH=${OSARCH} VERSION=$(shell echo ${VERSION} | tr -d 'v') go run github.com/goreleaser/nfpm/v2/cmd/nfpm pkg --config ./scripts/.local-nfpm.yaml --packager apk --target ./build/${PACKAGE_PREFIX}-$(shell echo ${VERSION} | tr -d 'v')-SNAPSHOT-${COMMIT}.apk; local-deb-package: ## Create local deb package - GOWORK=off CGO_ENABLED=0 GOARCH=${LOCAL_ARCH} GOOS=linux go build -ldflags=${DEBUG_LDFLAGS} -o ./build/nginx-agent - ARCH=${LOCAL_ARCH} VERSION=$(shell echo ${VERSION} | tr -d 'v') nfpm pkg --config ./scripts/.local-nfpm.yaml --packager deb --target ./build/${PACKAGE_PREFIX}-$(shell echo ${VERSION} | tr -d 'v')-SNAPSHOT-${COMMIT}.deb; + GOWORK=off CGO_ENABLED=0 GOARCH=${OSARCH} GOOS=linux go build -ldflags=${DEBUG_LDFLAGS} -o ./build/nginx-agent + ARCH=${OSARCH} VERSION=$(shell echo ${VERSION} | tr -d 'v') go run github.com/goreleaser/nfpm/v2/cmd/nfpm pkg --config ./scripts/.local-nfpm.yaml --packager deb --target ./build/${PACKAGE_PREFIX}-$(shell echo ${VERSION} | tr -d 'v')-SNAPSHOT-${COMMIT}.deb; local-rpm-package: ## Create local rpm package - GOWORK=off CGO_ENABLED=0 GOARCH=${LOCAL_ARCH} GOOS=linux go build -ldflags=${DEBUG_LDFLAGS} -o ./build/nginx-agent - ARCH=${LOCAL_ARCH} VERSION=$(shell echo ${VERSION} | tr -d 'v') nfpm pkg --config ./scripts/.local-nfpm.yaml --packager rpm --target ./build/${PACKAGE_PREFIX}-$(shell echo ${VERSION} | tr -d 'v')-SNAPSHOT-${COMMIT}.rpm; + GOWORK=off CGO_ENABLED=0 GOARCH=${OSARCH} GOOS=linux go build -ldflags=${DEBUG_LDFLAGS} -o ./build/nginx-agent + ARCH=${OSARCH} VERSION=$(shell echo ${VERSION} | tr -d 'v') go run github.com/goreleaser/nfpm/v2/cmd/nfpm pkg --config ./scripts/.local-nfpm.yaml --packager rpm --target ./build/${PACKAGE_PREFIX}-$(shell echo ${VERSION} | tr -d 'v')-SNAPSHOT-${COMMIT}.rpm; local-txz-package: ## Create local txz package - GOWORK=off CGO_ENABLED=0 GOARCH=${LOCAL_ARCH} GOOS=freebsd go build -ldflags=${DEBUG_LDFLAGS} -o ./build/nginx-agent + GOWORK=off CGO_ENABLED=0 GOARCH=${OSARCH} GOOS=freebsd go build -ldflags=${DEBUG_LDFLAGS} -o ./build/nginx-agent $(CONTAINER_CLITOOL) run -v ${PWD}:/nginx-agent/$(CONTAINER_VOLUME_FLAGS) build-local-packager:1.0.0 txz-packager-image: ## Builds txz packager container image @@ -139,7 +148,7 @@ include Makefile.packaging generate-mocks: ## Regenerate all needed mocks, in order to add new mocks generation add //go:generate mockgen to file from witch mocks should be generated GOWORK=off go generate ./... -test: unit-test performance-test component-test ## Run all tests +test: unit-test performance-test component-test integration-test ## Run all tests $(TEST_BUILD_DIR): mkdir -p $(TEST_BUILD_DIR) @@ -180,8 +189,8 @@ performance-test: ## Run performance tests $(CONTAINER_CLITOOL) run -v ${PWD}:/home/nginx/$(CONTAINER_VOLUME_FLAGS) --rm nginx-agent-benchmark:1.0.0 integration-test: local-deb-package - PACKAGE_NAME=${PACKAGE_NAME} BASE_IMAGE=${BASE_IMAGE} go test ./test/integration/install - PACKAGE_NAME=${PACKAGE_NAME} BASE_IMAGE=${BASE_IMAGE} go test ./test/integration/api + PACKAGE_NAME=${PACKAGE_NAME} BASE_IMAGE=${BASE_IMAGE} go test -v ./test/integration/install + PACKAGE_NAME=${PACKAGE_NAME} BASE_IMAGE=${BASE_IMAGE} go test -v ./test/integration/api test-bench: ## Run benchmark tests cd test/performance && GOWORK=off CGO_ENABLED=0 go test -mod=vendor -count 5 -timeout 2m -bench=. -benchmem metrics_test.go @@ -244,5 +253,13 @@ run-container: ## Run container from specified IMAGE_TAG $(CONTAINER_CLITOOL) run ${IMAGE_TAG} # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # -# Dashboard Targets # +# Grafana Example Dashboard Targets # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # +clean-grafana-example: clean ## Clean example packages and docker + cd ./examples/grafana-metrics/ && BASE_IMAGE= PACKAGE_NAME= ${CONTAINER_COMPOSE} down + +build-grafana-example: local-deb-package ## Build the example of nginx-agent + cd ./examples/grafana-metrics/ && BASE_IMAGE=${BASE_IMAGE} PACKAGE_NAME=${PACKAGE_NAME} ${CONTAINER_COMPOSE} build + +run-grafana-example: ## Start the example of nginx-agent + cd ./examples/grafana-metrics/ && BASE_IMAGE=${BASE_IMAGE} PACKAGE_NAME=${PACKAGE_NAME} ${CONTAINER_COMPOSE} up diff --git a/examples/grafana-metrics/Dockerfile b/examples/grafana-metrics/Dockerfile deleted file mode 100644 index eaf8c2f1b4..0000000000 --- a/examples/grafana-metrics/Dockerfile +++ /dev/null @@ -1,44 +0,0 @@ -FROM ubuntu:22.04 as install -LABEL maintainer="NGINX Agent Maintainers " - -WORKDIR /agent -COPY ./build/nginx-agent.deb /agent/nginx-agent.deb -COPY ./entrypoint.sh /agent/entrypoint.sh -COPY ./nginx-agent.conf /agent/nginx-agent.conf -COPY ./nginx.conf /agent/nginx.conf - -RUN set -x \ - && addgroup --system --gid 101 nginx \ - && adduser --system --disabled-login --ingroup nginx --no-create-home --home /nonexistent --gecos "nginx user" --shell /bin/false --uid 101 nginx \ - && apt-get update \ - && apt-get install --no-install-recommends --no-install-suggests -y \ - ca-certificates \ - gnupg1 \ - lsb-release \ - git \ - wget \ - make \ - curl \ - vim \ - && apt-get update \ - && apt-get install -y build-essential libpcre3 libpcre3-dev zlib1g zlib1g-dev libssl-dev \ - && wget http://nginx.org/download/nginx-1.23.2.tar.gz \ - && tar xfz nginx-1.23.2.tar.gz \ - && cd nginx-1.23.2 \ - && cp /agent/nginx.conf ./conf/nginx.conf \ - && ./configure --with-http_stub_status_module \ - && make \ - && make install \ - && apt-get install -y -f /agent/nginx-agent.deb - -# run the nginx and agent -FROM install as runtime - -COPY --from=install /agent/entrypoint.sh /agent/entrypoint.sh -COPY --from=install /agent/nginx-agent.conf /etc/nginx-agent/nginx-agent.conf - -RUN chmod +x /agent/entrypoint.sh -STOPSIGNAL SIGTERM -EXPOSE 8080 - -ENTRYPOINT ["/agent/entrypoint.sh"] diff --git a/examples/grafana-metrics/Makefile b/examples/grafana-metrics/Makefile deleted file mode 100644 index bc746e125f..0000000000 --- a/examples/grafana-metrics/Makefile +++ /dev/null @@ -1,15 +0,0 @@ - -help: ## Show help message - @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\033[36m\033[0m\n"} /^[$$()% 0-9a-zA-Z_-]+:.*?##/ { printf " \033[36m%-24s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) - -clean: ## Remove docker containers and agent package - docker-compose down - rm -rf ./build - -build: ## Build agent package - mkdir ./build - cd ../../ && GOWORK=off CGO_ENABLED=0 GOARCH=amd64 GOOS=linux go build -o ./build/nginx-agent - cd ../../ && nfpm pkg --config ./scripts/.local-nfpm.yaml --packager deb --target ./examples/grafana-metrics/build/nginx-agent.deb - -run: build ## Start docker containers - docker-compose up --build diff --git a/examples/grafana-metrics/README.md b/examples/grafana-metrics/README.md index 66766d1ebf..63ee62f33e 100644 --- a/examples/grafana-metrics/README.md +++ b/examples/grafana-metrics/README.md @@ -3,10 +3,10 @@ This README will give instructions on how to run the demonstration on how the NG
-## Run Example -Run the following command to start up the agent grafana metrics example. This will create all the servies listed in [List of Services](#list-of-services). +## Run Grafana Example +Run the following command from the root directory, to start up the agent grafana metrics example. This will create all the services listed in [List of Services](#list-of-services). ``` -make clean build run +make clean-grafana-example build-grafana-example run-grafana-example ```
@@ -16,7 +16,7 @@ The services run by this example are listed below |-------------|-------| | Grafana | 3000 | | NGINX | 8080 | -| Grafana | 8081 | +| AGENT API | 8081 | | Prometheus | 9090 |
@@ -42,7 +42,7 @@ If you monitor the dashboard you'll see the graphs being populated. ## Teardown Example -Run the following command in the grafana directory to shut down example services. +Run the following command from the root directory to shut down example services. +``` +make clean-grafana-example ``` -make clean -``` \ No newline at end of file diff --git a/examples/grafana-metrics/docker-compose.yml b/examples/grafana-metrics/docker-compose.yml index 3f44275444..ee36bb9237 100644 --- a/examples/grafana-metrics/docker-compose.yml +++ b/examples/grafana-metrics/docker-compose.yml @@ -9,9 +9,17 @@ volumes: services: agent: - build: ./ - image: nginx/agent-example - container_name: agent + restart: unless-stopped + build: + context: ../../ + dockerfile: ./scripts/docker/nginx-oss/ubuntu/Dockerfile + args: + PACKAGE_NAME: ${PACKAGE_NAME} + BASE_IMAGE: ${BASE_IMAGE} + ENTRY_POINT: "./scripts/docker/nginx-oss/entrypoint.sh" + volumes: + - ./nginx.conf:/etc/nginx/nginx.conf + - ./nginx-agent.conf:/etc/nginx-agent/nginx-agent.conf ports: - 8080:8080 - 8081:8081 @@ -36,6 +44,7 @@ services: - monitoring grafana: image: grafana/grafana-oss:latest + restart: unless-stopped volumes: - ./grafana-datasources.yml:/etc/grafana/provisioning/datasources/grafana-datasources.yml - ./grafana-dashboards.yml:/etc/grafana/provisioning/dashboards/grafana-dashboards.yml diff --git a/examples/grafana-metrics/entrypoint.sh b/examples/grafana-metrics/entrypoint.sh deleted file mode 100644 index 86f02532f0..0000000000 --- a/examples/grafana-metrics/entrypoint.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash - -set -e -set -x -set -euxo pipefail - -handle_term() -{ - echo "received TERM signal" - echo "stopping nginx-agent ..." - kill -TERM "${agent_pid}" 2>/dev/null - echo "stopping nginx ..." - kill -TERM "${nginx_pid}" 2>/dev/null -} - -trap 'handle_term' TERM - -# Launch nginx -echo "starting nginx ..." -/usr/local/nginx/sbin/nginx -g "daemon off;" & - -nginx_pid=$! - -cp /agent/nginx-agent.conf /etc/nginx-agent/nginx-agent.conf -cat /etc/nginx-agent/nginx-agent.conf - -# start nginx-agent, pass args -echo "starting nginx-agent ..." -nginx-agent "$@" & - -agent_pid=$! - -if [ $? != 0 ]; then - echo "couldn't start the agent, please check the log file" - exit 1 -fi - -wait_term() -{ - wait ${agent_pid} - trap - TERM - kill -QUIT "${nginx_pid}" 2>/dev/null - echo "waiting for nginx to stop..." - wait ${nginx_pid} -} - -wait_term - -echo "nginx-agent process has stopped, exiting." diff --git a/examples/grafana-metrics/nginx.conf b/examples/grafana-metrics/nginx.conf index ffce79ff42..3fa4090e21 100644 --- a/examples/grafana-metrics/nginx.conf +++ b/examples/grafana-metrics/nginx.conf @@ -2,12 +2,7 @@ #user nobody; worker_processes 1; -#error_log logs/error.log; -#error_log logs/error.log notice; -#error_log logs/error.log info; - -#pid logs/nginx.pid; - +error_log /var/log/nginx/error.log; events { worker_connections 1024; @@ -23,27 +18,19 @@ http { '"$http_user_agent" "$http_x_forwarded_for" ' '"$bytes_sent" "$request_length" "$request_time" ' '"$gzip_ratio" $server_protocol '; - - access_log /usr/local/nginx/logs/access.log main; + + access_log /var/log/nginx/access.log main; sendfile on; - #tcp_nopush on; - #keepalive_timeout 0; keepalive_timeout 65; - #gzip on; - server { listen 8080; server_name localhost; - #charset koi8-r; - - #access_log logs/host.access.log main; - location / { - root /usr/local/nginx/html; + root /usr/share/nginx/html; index index.html index.htm; allow 127.0.0.1; } @@ -67,7 +54,7 @@ http { # error_page 500 502 503 504 /50x.html; location = /50x.html { - root /usr/local/nginx/html; + root /usr/share/nginx/html; } } } diff --git a/go.mod b/go.mod index 571ea80543..bee4a8107f 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/google/go-cmp v0.5.9 github.com/google/uuid v1.3.0 github.com/klauspost/cpuid/v2 v2.1.0 - github.com/maxbrunsfeld/counterfeiter/v6 v6.5.0 + github.com/maxbrunsfeld/counterfeiter/v6 v6.6.1 github.com/mitchellh/mapstructure v1.5.0 github.com/mwitkow/go-proto-validators v0.3.2 github.com/nginxinc/nginx-plus-go-client v0.10.0 @@ -24,13 +24,13 @@ require ( github.com/sirupsen/logrus v1.9.0 github.com/spf13/cobra v1.6.1 github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.14.0 + github.com/spf13/viper v1.15.0 github.com/stretchr/testify v1.8.1 github.com/trivago/grok v1.0.0 github.com/vardius/message-bus v1.1.5 go.uber.org/atomic v1.10.0 golang.org/x/sync v0.1.0 - google.golang.org/grpc v1.51.0 + google.golang.org/grpc v1.52.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8 // indirect gopkg.in/mcuadros/go-syslog.v2 v2.3.0 @@ -38,42 +38,52 @@ require ( ) require ( - github.com/bufbuild/buf v1.12.0 + github.com/bufbuild/buf v1.14.0 github.com/go-resty/resty/v2 v2.7.0 github.com/go-swagger/go-swagger v0.30.4 + github.com/goreleaser/nfpm/v2 v2.26.0 github.com/nginx/agent/sdk/v2 v2.0.0-00010101000000-000000000000 github.com/prometheus/client_golang v1.13.0 github.com/pseudomuto/protoc-gen-doc v1.5.1 - github.com/rs/cors v1.8.2 + github.com/rs/cors v1.8.3 gopkg.in/yaml.v2 v2.4.0 ) require ( - github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/AlekSi/pointer v1.2.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver v1.4.2 // indirect + github.com/Masterminds/semver v1.5.0 // indirect github.com/Masterminds/semver/v3 v3.2.0 // indirect - github.com/Masterminds/sprig v2.15.0+incompatible // indirect + github.com/Masterminds/sprig v2.22.0+incompatible // indirect github.com/Masterminds/sprig/v3 v3.2.3 // indirect github.com/Microsoft/go-winio v0.6.0 // indirect - github.com/aokoli/goutils v1.0.1 // indirect + github.com/ProtonMail/go-crypto v0.0.0-20210512092938-c05353c2d58c // indirect github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bufbuild/connect-go v1.4.1 // indirect - github.com/bufbuild/protocompile v0.1.0 // indirect + github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb // indirect + github.com/bufbuild/connect-go v1.5.0 // indirect + github.com/bufbuild/protocompile v0.2.0 // indirect + github.com/cavaliergopher/cpio v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/docker/cli v20.10.21+incompatible // indirect + github.com/docker/cli v20.10.23+incompatible // indirect github.com/docker/distribution v2.8.1+incompatible // indirect - github.com/docker/docker v20.10.21+incompatible // indirect + github.com/docker/docker v23.0.0+incompatible // indirect github.com/docker/docker-credential-helpers v0.7.0 // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect + github.com/emirpasic/gods v1.12.0 // indirect github.com/envoyproxy/protoc-gen-validate v0.3.0-java // indirect github.com/felixge/fgprof v0.9.3 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect github.com/go-chi/chi/v5 v5.0.8 // indirect + github.com/go-git/gcfg v1.5.0 // indirect + github.com/go-git/go-billy/v5 v5.3.1 // indirect + github.com/go-git/go-git/v5 v5.2.0 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-openapi/analysis v0.21.4 // indirect github.com/go-openapi/errors v0.20.3 // indirect @@ -86,21 +96,27 @@ require ( github.com/go-openapi/strfmt v0.21.3 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/go-openapi/validate v0.22.0 // indirect + github.com/gobwas/glob v0.2.3 // indirect github.com/gofrs/flock v0.8.1 // indirect - github.com/gofrs/uuid v4.3.1+incompatible // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/google/go-containerregistry v0.12.1 // indirect - github.com/google/pprof v0.0.0-20221203041831-ce31453925ec // indirect + github.com/gofrs/uuid v4.4.0+incompatible // indirect + github.com/google/go-containerregistry v0.13.0 // indirect + github.com/google/pprof v0.0.0-20230131232505-5a9e8f65f08f // indirect + github.com/goreleaser/chglog v0.4.1 // indirect + github.com/goreleaser/fileglob v1.3.0 // indirect github.com/gorilla/handlers v1.5.1 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/huandu/xstrings v1.3.3 // indirect + github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0 // indirect github.com/imdario/mergo v0.3.13 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/invopop/jsonschema v0.7.0 // indirect + github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jdxcode/netrc v0.0.0-20221124155335-4616370d1a84 // indirect github.com/jessevdk/go-flags v1.5.0 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/klauspost/compress v1.15.13 // indirect + github.com/kevinburke/ssh_config v1.1.0 // indirect + github.com/klauspost/compress v1.15.15 // indirect github.com/klauspost/pgzip v1.2.5 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect @@ -113,13 +129,16 @@ require ( github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect github.com/morikuni/aec v1.0.0 // indirect + github.com/muesli/mango v0.1.0 // indirect + github.com/muesli/mango-cobra v1.2.0 // indirect + github.com/muesli/mango-pflag v0.1.0 // indirect + github.com/muesli/roff v0.1.0 // indirect github.com/nginxinc/nginx-go-crossplane v0.4.1 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0-rc2 // indirect github.com/pascaldekloe/name v1.0.1 // indirect - github.com/pelletier/go-toml v1.9.5 // indirect - github.com/pelletier/go-toml/v2 v2.0.5 // indirect + github.com/pelletier/go-toml/v2 v2.0.6 // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pkg/profile v1.7.0 // indirect @@ -131,22 +150,25 @@ require ( github.com/pseudomuto/protokit v0.2.0 // indirect github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/sergi/go-diff v1.2.0 // indirect github.com/shopspring/decimal v1.2.0 // indirect - github.com/spf13/afero v1.9.2 // indirect + github.com/spf13/afero v1.9.3 // indirect github.com/spf13/cast v1.5.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/stretchr/objx v0.5.0 // indirect - github.com/subosito/gotenv v1.4.1 // indirect + github.com/subosito/gotenv v1.4.2 // indirect github.com/tklauser/go-sysconf v0.3.10 // indirect github.com/tklauser/numcpus v0.5.0 // indirect github.com/toqueteos/webbrowser v1.2.0 // indirect github.com/trivago/tgo v1.0.7 // indirect + github.com/ulikunitz/xz v0.5.11 // indirect + github.com/xanzy/ssh-agent v0.3.1 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect + gitlab.com/digitalxero/go-conventional-commit v1.0.7 // indirect go.mongodb.org/mongo-driver v1.11.1 // indirect - go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel v1.11.2 // indirect - go.opentelemetry.io/otel/metric v0.34.0 // indirect - go.opentelemetry.io/otel/trace v1.11.2 // indirect + go.opentelemetry.io/otel v1.12.0 // indirect + go.opentelemetry.io/otel/sdk v1.12.0 // indirect + go.opentelemetry.io/otel/trace v1.12.0 // indirect go.uber.org/goleak v1.1.12 // indirect go.uber.org/multierr v1.9.0 // indirect go.uber.org/zap v1.24.0 // indirect @@ -156,11 +178,11 @@ require ( golang.org/x/sys v0.4.0 // indirect golang.org/x/term v0.4.0 // indirect golang.org/x/text v0.6.0 // indirect - golang.org/x/time v0.1.0 // indirect golang.org/x/tools v0.5.0 // indirect - google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 // indirect + google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect + gopkg.in/warnings.v0 v0.1.2 // indirect gotest.tools/v3 v3.4.0 // indirect ) diff --git a/go.sum b/go.sum index 98b76b0afc..5c47c63e37 100644 --- a/go.sum +++ b/go.sum @@ -36,24 +36,34 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/AlekSi/pointer v1.2.0 h1:glcy/gc4h8HnG2Z3ZECSzZ1IX1x2JxRVuDzaJwQE0+w= +github.com/AlekSi/pointer v1.2.0/go.mod h1:gZGfd3dpW4vEc/UlyfKKi1roIqcCgwOIvb0tSNSBle0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc= -github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/sprig v2.15.0+incompatible h1:0gSxPGWS9PAr7U2NsQ2YQg6juRDINkUyuvbb4b2Xm8w= -github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= +github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= +github.com/ProtonMail/go-crypto v0.0.0-20210512092938-c05353c2d58c h1:bNpaLLv2Y4kslsdkdCwAYu8Bak1aGVtxwi8Z/wy4Yuo= +github.com/ProtonMail/go-crypto v0.0.0-20210512092938-c05353c2d58c/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= +github.com/ProtonMail/go-mime v0.0.0-20190923161245-9b5a4261663a h1:W6RrgN/sTxg1msqzFFb+G80MFmpjMw61IU+slm+wln4= +github.com/ProtonMail/gopenpgp/v2 v2.2.2 h1:u2m7xt+CZWj88qK1UUNBoXeJCFJwJCZ/Ff4ymGoxEXs= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= +github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -61,8 +71,10 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alvaroloes/enumer v1.1.2 h1:5khqHB33TZy1GWCO/lZwcroBFh7u+0j40T83VUbfAMY= github.com/alvaroloes/enumer v1.1.2/go.mod h1:FxrjvuXoDAx9isTJrv4c+T410zFi0DtXIT0m65DJ+Wo= -github.com/aokoli/goutils v1.0.1 h1:7fpzNGoJ3VA8qcrm++XEE1QUe0mIwNeLa02Nwq7RDkg= -github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= @@ -71,12 +83,19 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bufbuild/buf v1.12.0 h1:F5yU2VNaJv3PzfQu+qnUWyzn9KerhbIfV/dM961jJ70= -github.com/bufbuild/buf v1.12.0/go.mod h1:tgUhc+/7jgJPn6EbcS8oIFOiSRewkyO/9j+Acbby9SQ= -github.com/bufbuild/connect-go v1.4.1 h1:6usL3JGjKhxQpvDlizP7u8VfjAr1JkckcAUbrdcbgNY= -github.com/bufbuild/connect-go v1.4.1/go.mod h1:9iNvh/NOsfhNBUH5CtvXeVUskQO1xsrEviH7ZArwZ3I= -github.com/bufbuild/protocompile v0.1.0 h1:HjgJBI85hY/qmW5tw/66sNDZ7z0UDdVSi/5r40WHw4s= -github.com/bufbuild/protocompile v0.1.0/go.mod h1:ix/MMMdsT3fzxfw91dvbfzKW3fRRnuPCP47kpAm5m/4= +github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb h1:m935MPodAbYS46DG4pJSv7WO+VECIWUQ7OJYSoTrMh4= +github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= +github.com/bufbuild/buf v1.14.0 h1:BSpRDgxC8jKwV+XJXyVuB0PM1na/qL80rNAgzb1PZnU= +github.com/bufbuild/buf v1.14.0/go.mod h1:Z5FtbEZtMdog6dGRZdCvIZTc6lPYaCz3AONEPmXNkyk= +github.com/bufbuild/connect-go v1.5.0 h1:IfbgbzzaaZvF+OM3SfxO2EjtvNJarNAz2DIRuuNjAgc= +github.com/bufbuild/connect-go v1.5.0/go.mod h1:9iNvh/NOsfhNBUH5CtvXeVUskQO1xsrEviH7ZArwZ3I= +github.com/bufbuild/protocompile v0.2.0 h1:BykKTiwLe/Z4WaYKI8qHbD0zCijHI/VhCG5I/MwTwHg= +github.com/bufbuild/protocompile v0.2.0/go.mod h1:tleDrpPTlLUVmgnEoN6qBliKWqJaZFJXqZdFjTd+ocU= +github.com/caarlos0/go-rpmutils v0.2.1-0.20211112020245-2cd62ff89b11 h1:IRrDwVlWQr6kS1U8/EtyA1+EHcc4yl8pndcqXWrEamg= +github.com/caarlos0/testfs v0.4.4 h1:3PHvzHi5Lt+g332CiShwS8ogTgS3HjrmzZxCm6JCDr8= +github.com/caarlos0/testfs v0.4.4/go.mod h1:bRN55zgG4XCUVVHZCeU+/Tz1Q6AxEJOEJTliBy+1DMk= +github.com/cavaliergopher/cpio v1.0.1 h1:KQFSeKmZhv0cr+kawA3a0xTQCU4QxXF1vhU7P7av2KM= +github.com/cavaliergopher/cpio v1.0.1/go.mod h1:pBdaqQjnvXxdS/6CvNDwIANIFSP0xRKI16PX4xejRQc= github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4= github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -98,18 +117,20 @@ github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docker/cli v20.10.21+incompatible h1:qVkgyYUnOLQ98LtXBrwd/duVqPT2X4SHndOuGsfwyhU= -github.com/docker/cli v20.10.21+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v20.10.23+incompatible h1:qwyha/T3rXk9lfuVcn533cKFc7n/6IzL5GXVAgMVPBg= +github.com/docker/cli v20.10.23+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog= -github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v23.0.0+incompatible h1:L6c28tNyqZ4/ub9AZC9d5QUuunoHHfEH4/Ue+h/E5nE= +github.com/docker/docker v23.0.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= +github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -123,13 +144,25 @@ github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNu github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= +github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-chi/chi/v5 v5.0.8 h1:lD+NLqFcAi1ovnVZpsnObHGW4xb4J8lNmoYVfECH1Y0= github.com/go-chi/chi/v5 v5.0.8/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= +github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= +github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= +github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34= +github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12 h1:PbKy9zOy4aAKrJ5pibIRpVO2BXnK1Tlcg+caKI7Ox5M= +github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= +github.com/go-git/go-git/v5 v5.2.0 h1:YPBLG/3UK1we1ohRkncLjaXWLW+HKp5QNM/jTli2JgI= +github.com/go-git/go-git/v5 v5.2.0/go.mod h1:kh02eMX+wdqqxgNMEyq8YgwlIOsDOa9homkUq1PoTMs= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -141,6 +174,11 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= @@ -210,10 +248,12 @@ github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWe github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gofrs/uuid v4.3.1+incompatible h1:0/KbAdpx3UXAx1kEOWHJeOkpbgRFGHVgv+CFIY7dBJI= -github.com/gofrs/uuid v4.3.1+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= +github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -222,8 +262,6 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -261,15 +299,14 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.12.1 h1:W1mzdNUTx4Zla4JaixCRLhORcR7G6KxE5hHl5fkPsp8= -github.com/google/go-containerregistry v0.12.1/go.mod h1:sdIK+oHQO7B93xI8UweYdl887YhuIwg9vz8BSLH3+8k= +github.com/google/go-containerregistry v0.13.0 h1:y1C7Z3e149OJbOPDBxLYR8ITPz8dTKqQwjErKVHJC8k= +github.com/google/go-containerregistry v0.13.0/go.mod h1:J9FQ+eSS4a1aC2GNZxvNpbWhgp0487v+cgiilB4FqDo= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -286,8 +323,8 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= -github.com/google/pprof v0.0.0-20221203041831-ce31453925ec h1:fR20TYVVwhK4O7r7y+McjRYyaTH6/vjwJOajE+XhlzM= -github.com/google/pprof v0.0.0-20221203041831-ce31453925ec/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= +github.com/google/pprof v0.0.0-20230131232505-5a9e8f65f08f h1:gl1DCiSk+mrXXBGPm6CEeS2MkJuMVzAOrXg34oVj1QI= +github.com/google/pprof v0.0.0-20230131232505-5a9e8f65f08f/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -296,6 +333,13 @@ github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/goreleaser/chglog v0.4.1 h1:6IAduyHpR58u3OFBdTrg8I/qAaBAmYYZ4Wq1Fz30QUY= +github.com/goreleaser/chglog v0.4.1/go.mod h1:85xT/GTwDCzLdjysP9aj+x6hQ+IxAv8SXx9MTrgcY2Y= +github.com/goreleaser/fileglob v1.3.0 h1:/X6J7U8lbDpQtBvGcwwPS6OpzkNVlVEsFUVRx9+k+7I= +github.com/goreleaser/fileglob v1.3.0/go.mod h1:Jx6BoXv3mbYkEzwm9THo7xbr5egkAraxkGorbJb4RxU= +github.com/goreleaser/nfpm/v2 v2.26.0 h1:nL7sXwsMLsc+NWE4Eddev+ZZomRaucT0WSnWkLwuxBM= +github.com/goreleaser/nfpm/v2 v2.26.0/go.mod h1:AQAOZ89rL4rHbv6ZdwyCWkjMam3LrHi9QIq56TnjUQk= github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= @@ -307,9 +351,12 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0 h1:i462o439ZjprVSFSZLZxcsoAe592sZB1rci2Z8j4wdk= +github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= +github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= @@ -317,11 +364,16 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/invopop/jsonschema v0.7.0 h1:2vgQcBz1n256N+FpX3Jq7Y17AjYt46Ig3zIWyy770So= +github.com/invopop/jsonschema v0.7.0/go.mod h1:O9uiLokuu0+MGFlyiaqtWxwqJm41/+8Nj0lD7A36YH0= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jdxcode/netrc v0.0.0-20221124155335-4616370d1a84 h1:2uT3aivO7NVpUPGcQX7RbHijHMyWix/yCnIrCWc+5co= github.com/jdxcode/netrc v0.0.0-20221124155335-4616370d1a84/go.mod h1:Zi/ZFkEqFHTm7qkjyNJjaWH4LQA9LQhGJyF0lTYGpxw= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jhump/protoreflect v1.14.0 h1:MBbQK392K3u8NTLbKOCIi3XdI+y+c6yt5oMq0X3xviw= +github.com/jhump/protoreflect v1.14.1 h1:N88q7JkxTHWFEqReuTsYH1dPIwXxA0ITNQp7avLY10s= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= @@ -333,16 +385,20 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jstemmer/go-junit-report v1.0.0/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kevinburke/ssh_config v1.1.0 h1:pH/t1WS9NzT8go394IqZeJTMHVm6Cr6ZJ6AQ+mdNo/o= +github.com/kevinburke/ssh_config v1.1.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.13 h1:NFn1Wr8cfnenSJSA46lLq4wHCcBzKTSjnBIexDMMOV0= -github.com/klauspost/compress v1.15.13/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= +github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= github.com/klauspost/cpuid/v2 v2.1.0 h1:eyi1Ad2aNJMW95zcSbmGg7Cg6cq3ADwLpMAP96d8rF0= github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= @@ -372,11 +428,14 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= +github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/maxbrunsfeld/counterfeiter/v6 v6.5.0 h1:rBhB9Rls+yb8kA4x5a/cWxOufWfXt24E+kq4YlbGj3g= github.com/maxbrunsfeld/counterfeiter/v6 v6.5.0/go.mod h1:fJ0UAZc1fx3xZhU4eSHQDJ1ApFmTVhp5VTpV9tm2ogg= +github.com/maxbrunsfeld/counterfeiter/v6 v6.6.1 h1:9XE5ykDiC8eNSqIPkxx0EsV3kMX1oe4kQWRZjIgytUA= +github.com/maxbrunsfeld/counterfeiter/v6 v6.6.1/go.mod h1:qbKwBR+qQODzH2WD/s53mdgp/xVcXMlJb59GRFOp6Z4= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= @@ -399,6 +458,14 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/muesli/mango v0.1.0 h1:DZQK45d2gGbql1arsYA4vfg4d7I9Hfx5rX/GCmzsAvI= +github.com/muesli/mango v0.1.0/go.mod h1:5XFpbC8jY5UUv89YQciiXNlbi+iJgt29VDC5xbzrLL4= +github.com/muesli/mango-cobra v1.2.0 h1:DQvjzAM0PMZr85Iv9LIMaYISpTOliMEg+uMFtNbYvWg= +github.com/muesli/mango-cobra v1.2.0/go.mod h1:vMJL54QytZAJhCT13LPVDfkvCUJ5/4jNUKF/8NC2UjA= +github.com/muesli/mango-pflag v0.1.0 h1:UADqbYgpUyRoBja3g6LUL+3LErjpsOwaC9ywvBWe7Sg= +github.com/muesli/mango-pflag v0.1.0/go.mod h1:YEQomTxaCUp8PrbhFh10UfbhbQrM/xJ4i2PB8VTLLW0= +github.com/muesli/roff v0.1.0 h1:YD0lalCotmYuF5HhZliKWlIx7IEhiXeSfq7hNjFqGF8= +github.com/muesli/roff v0.1.0/go.mod h1:pjAHQM9hdUUwm/krAfrLGgJkXJ+YuhtsfZ42kieB2Ig= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-proto-validators v0.3.2 h1:qRlmpTzm2pstMKKzTdvwPCF5QfBNURSlAgN/R+qbKos= @@ -422,8 +489,8 @@ github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3 github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= +github.com/onsi/gomega v1.26.0 h1:03cDLK28U6hWvCAns6NeydX3zIm4SF3ci69ulidS32Q= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034= @@ -435,10 +502,8 @@ github.com/pascaldekloe/name v0.0.0-20180628100202-0fd16699aae1/go.mod h1:eD5Jxq github.com/pascaldekloe/name v1.0.1 h1:9lnXOHeqeHHnWLbKfH6X98+4+ETVqFqxN09UXSjcMb0= github.com/pascaldekloe/name v1.0.1/go.mod h1:Z//MfYJnH4jVpQ9wkclwu2I2MkHmXTlT9wR5UZScttM= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= -github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= -github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= -github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= +github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU= +github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -488,12 +553,15 @@ github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= -github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/cors v1.8.3 h1:O+qNyWn7Z+F9M0ILBHgMVPuB1xTOucVd5gtaYyXBpRo= +github.com/rs/cors v1.8.3/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8= github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil/v3 v3.22.7 h1:flKnuCMfUUrO+oAvwAd6GKZgnPzr098VA/UJ14nhJd4= @@ -505,10 +573,13 @@ github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw= -github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= +github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= +github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk= +github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= @@ -520,8 +591,8 @@ github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0 github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.14.0 h1:Rg7d3Lo706X9tHsJMUjdiwMpHB7W8WnSVOssIY+JElU= -github.com/spf13/viper v1.14.0/go.mod h1:WT//axPky3FdvXHzGw33dNdXXXfFQqmEalje+egj8As= +github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= +github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -529,6 +600,7 @@ github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -537,8 +609,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= -github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= +github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= @@ -552,14 +624,20 @@ github.com/trivago/grok v1.0.0 h1:oV2ljyZT63tgXkmgEHg2U0jMqiKKuL0hkn49s6aRavQ= github.com/trivago/grok v1.0.0/go.mod h1:9t59xLInhrncYq9a3J7488NgiBZi5y5yC7bss+w4NHM= github.com/trivago/tgo v1.0.7 h1:uaWH/XIy9aWYWpjm2CU3RpcqZXmX2ysQ9/Go+d9gyrM= github.com/trivago/tgo v1.0.7/go.mod h1:w4dpD+3tzNIIiIfkWWa85w5/B77tlvdZckQ+6PkFnhc= +github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= +github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/vardius/message-bus v1.1.5 h1:YSAC2WB4HRlwc4neFPTmT88kzzoiQ+9WRRbej/E/LZc= github.com/vardius/message-bus v1.1.5/go.mod h1:6xladCV2lMkUAE4bzzS85qKOiB5miV7aBVRafiTJGqw= github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME= +github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= +github.com/xanzy/ssh-agent v0.3.1 h1:AmzO1SSWxw73zxFZPRwaMN1MohDw8UyHnmuxyceTEGo= +github.com/xanzy/ssh-agent v0.3.1/go.mod h1:QIE4lCeL7nkC25x+yA3LBIYfwCc1TFziCtG7cBAac6w= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -570,6 +648,8 @@ github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +gitlab.com/digitalxero/go-conventional-commit v1.0.7 h1:8/dO6WWG+98PMhlZowt/YjuiKhqhGlOCwlIV8SqqGh8= +gitlab.com/digitalxero/go-conventional-commit v1.0.7/go.mod h1:05Xc2BFsSyC5tKhK0y+P3bs0AwUtNuTp+mTpbCU/DZ0= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= @@ -581,14 +661,12 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v1.11.2 h1:YBZcQlsVekzFsFbjygXMOXSs6pialIZxcjfO/mBDmR0= -go.opentelemetry.io/otel v1.11.2/go.mod h1:7p4EUV+AqgdlNV9gL97IgUZiVR3yrFXYo53f9BM3tRI= -go.opentelemetry.io/otel/metric v0.34.0 h1:MCPoQxcg/26EuuJwpYN1mZTeCYAUGx8ABxfW07YkjP8= -go.opentelemetry.io/otel/metric v0.34.0/go.mod h1:ZFuI4yQGNCupurTXCwkeD/zHBt+C2bR7bw5JqUm/AP8= -go.opentelemetry.io/otel/trace v1.11.2 h1:Xf7hWSF2Glv0DE3MH7fBHvtpSBsjcBUe5MYAmZM/+y0= -go.opentelemetry.io/otel/trace v1.11.2/go.mod h1:4N+yC7QEz7TTsG9BSRLNAa63eg5E06ObSbKPmxQ/pKA= +go.opentelemetry.io/otel v1.12.0 h1:IgfC7kqQrRccIKuB7Cl+SRUmsKbEwSGPr0Eu+/ht1SQ= +go.opentelemetry.io/otel v1.12.0/go.mod h1:geaoz0L0r1BEOR81k7/n9W4TCXYCJ7bPO7K374jQHG0= +go.opentelemetry.io/otel/sdk v1.12.0 h1:8npliVYV7qc0t1FKdpU08eMnOjgPFMnriPhn0HH4q3o= +go.opentelemetry.io/otel/sdk v1.12.0/go.mod h1:WYcvtgquYvgODEvxOry5owO2y9MyciW7JqMz6cpXShE= +go.opentelemetry.io/otel/trace v1.12.0 h1:p28in++7Kd0r2d8gSt931O57fdjUyWxkVbESuILAeUc= +go.opentelemetry.io/otel/trace v1.12.0/go.mod h1:pHlgBynn6s25qJ2szD+Bv+iwKJttjHSI3lUAyf0GNuQ= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -601,6 +679,7 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -608,7 +687,9 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= @@ -685,7 +766,6 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= @@ -733,6 +813,7 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -748,6 +829,7 @@ golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -787,6 +869,7 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -823,7 +906,6 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA= -golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -953,8 +1035,8 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 h1:jmIfw8+gSvXcZSgaFAGyInDXeWzUhvYH57G/5GKMn70= -google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef h1:uQ2vjV/sHTsWSqdKeLqmwitzgvjMl7o4IdtHwUDXSJY= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -971,8 +1053,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.52.0 h1:kd48UiU7EHsV4rnLyOJRuP/Il/UHE7gdDAQ+SZI7nZk= +google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 h1:TLkBREm4nIsEcexnCjgQd5GQWaHcqMzwQV0TX9pq8S0= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0/go.mod h1:DNq5QpG7LJqD2AamLZ7zvKE0DEpVl2BSEVjFycAAjRY= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -1006,6 +1088,8 @@ gopkg.in/mcuadros/go-syslog.v2 v2.3.0 h1:kcsiS+WsTKyIEPABJBJtoG0KkOS6yzvJ+/eZlhD gopkg.in/mcuadros/go-syslog.v2 v2.3.0/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/go.work.sum b/go.work.sum index 8046fc5c53..b68d9557b6 100644 --- a/go.work.sum +++ b/go.work.sum @@ -1,125 +1,276 @@ +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= cloud.google.com/go/accessapproval v1.5.0 h1:/nTivgnV/n1CaAeo+ekGexTYUsKEU9jUVkoY5359+3Q= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= cloud.google.com/go/accesscontextmanager v1.4.0 h1:CFhNhU7pcD11cuDkQdrE6PQJgv0EXNKNv06jIzbLlCU= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= cloud.google.com/go/aiplatform v1.27.0 h1:DBi3Jk9XjCJ4pkkLM4NqKgj3ozUL1wq4l+d3/jTGXAI= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= cloud.google.com/go/analytics v0.12.0 h1:NKw6PpQi6V1O+KsjuTd+bhip9d0REYu4NevC45vtGp8= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= cloud.google.com/go/apigateway v1.4.0 h1:IIoXKR7FKrEAQhMTz5hK2wiDz2WNFHS7eVr/L1lE/rM= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= cloud.google.com/go/apigeeconnect v1.4.0 h1:AONoTYJviyv1vS4IkvWzq69gEVdvHx35wKXc+e6wjZQ= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= cloud.google.com/go/appengine v1.5.0 h1:lmG+O5oaR9xNwaRBwE2XoMhwQHsHql5IoiGr1ptdDwU= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= cloud.google.com/go/area120 v0.6.0 h1:TCMhwWEWhCn8d44/Zs7UCICTWje9j3HuV6nVGMjdpYw= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= cloud.google.com/go/artifactregistry v1.9.0 h1:3d0LRAU1K6vfqCahhl9fx2oGHcq+s5gftdix4v8Ibrc= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= cloud.google.com/go/asset v1.10.0 h1:aCrlaLGJWTODJX4G56ZYzJefITKEWNfbjjtHSzWpxW0= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= cloud.google.com/go/assuredworkloads v1.9.0 h1:hhIdCOowsT1GG5eMCIA0OwK6USRuYTou/1ZeNxCSRtA= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= cloud.google.com/go/automl v1.8.0 h1:BMioyXSbg7d7xLibn47cs0elW6RT780IUWr42W8rp2Q= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= cloud.google.com/go/baremetalsolution v0.4.0 h1:g9KO6SkakcYPcc/XjAzeuUrEOXlYPnMpuiaywYaGrmQ= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= cloud.google.com/go/batch v0.4.0 h1:1jvEBY55OH4Sd2FxEXQfxGExFWov1A/IaRe+Z5Z71Fw= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= cloud.google.com/go/beyondcorp v0.3.0 h1:w+4kThysgl0JiKshi2MKDCg2NZgOyqOI0wq2eBZyrzA= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= cloud.google.com/go/bigquery v1.44.0 h1:Wi4dITi+cf9VYp4VH2T9O41w0kCW0uQTELq2Z6tukN0= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= cloud.google.com/go/billing v1.7.0 h1:Xkii76HWELHwBtkQVZvqmSo9GTr0O+tIbRNnMcGdlg4= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= cloud.google.com/go/binaryauthorization v1.4.0 h1:pL70vXWn9TitQYXBWTK2abHl2JHLwkFRjYw6VflRqEA= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= cloud.google.com/go/certificatemanager v1.4.0 h1:tzbR4UHBbgsewMWUD93JHi8EBi/gHBoSAcY1/sThFGk= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= cloud.google.com/go/channel v1.9.0 h1:pNuUlZx0Jb0Ts9P312bmNMuH5IiFWIR4RUtLb70Ke5s= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= cloud.google.com/go/cloudbuild v1.4.0 h1:TAAmCmAlOJ4uNBu6zwAjwhyl/7fLHHxIEazVhr3QBbQ= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= cloud.google.com/go/clouddms v1.4.0 h1:UhzHIlgFfMr6luVYVNydw/pl9/U5kgtjCMJHnSvoVws= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= cloud.google.com/go/cloudtasks v1.8.0 h1:faUiUgXjW8yVZ7XMnKHKm1WE4OldPBUWWfIRN/3z1dc= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/contactcenterinsights v1.4.0 h1:tTQLI/ZvguUf9Hv+36BkG2+/PeC8Ol1q4pBW+tgCx0A= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= cloud.google.com/go/container v1.7.0 h1:nbEK/59GyDRKKlo1SqpohY1TK8LmJ2XNcvS9Gyom2A0= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= cloud.google.com/go/containeranalysis v0.6.0 h1:2824iym832ljKdVpCBnpqm5K94YT/uHTVhNF+dRTXPI= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= cloud.google.com/go/datacatalog v1.8.0 h1:6kZ4RIOW/uT7QWC5SfPfq/G8sYzr/v+UOmOAxy4Z1TE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= cloud.google.com/go/dataflow v0.7.0 h1:CW3541Fm7KPTyZjJdnX6NtaGXYFn5XbFC5UcjgALKvU= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= cloud.google.com/go/dataform v0.5.0 h1:vLwowLF2ZB5J5gqiZCzv076lDI/Rd7zYQQFu5XO1PSg= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= cloud.google.com/go/datafusion v1.5.0 h1:j5m2hjWovTZDTQak4MJeXAR9yN7O+zMfULnjGw/OOLg= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= cloud.google.com/go/datalabeling v0.6.0 h1:dp8jOF21n/7jwgo/uuA0RN8hvLcKO4q6s/yvwevs2ZM= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= cloud.google.com/go/dataplex v1.4.0 h1:cNxeA2DiWliQGi21kPRqnVeQ5xFhNoEjPRt1400Pm8Y= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= cloud.google.com/go/dataproc v1.8.0 h1:gVOqNmElfa6n/ccG/QDlfurMWwrK3ezvy2b2eDoCmS0= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= cloud.google.com/go/dataqna v0.6.0 h1:gx9jr41ytcA3dXkbbd409euEaWtofCVXYBvJz3iYm18= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= cloud.google.com/go/datastore v1.10.0 h1:4siQRf4zTiAVt/oeH4GureGkApgb2vtPQAtOmhpqQwE= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= cloud.google.com/go/datastream v1.5.0 h1:PgIgbhedBtYBU6POGXFMn2uSl9vpqubc3ewTNdcU8Mk= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= cloud.google.com/go/deploy v1.5.0 h1:kI6dxt8Ml0is/x7YZjLveTvR7YPzXAUD/8wQZ2nH5zA= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= cloud.google.com/go/dialogflow v1.19.0 h1:HYHVOkoxQ9bSfNIelSZYNAtUi4CeSrCnROyOsbOqPq8= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= cloud.google.com/go/dlp v1.7.0 h1:9I4BYeJSVKoSKgjr70fLdRDumqcUeVmHV4fd5f9LR6Y= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= cloud.google.com/go/documentai v1.10.0 h1:jfq09Fdjtnpnmt/MLyf6A3DM3ynb8B2na0K+vSXvpFM= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= cloud.google.com/go/domains v0.7.0 h1:pu3JIgC1rswIqi5romW0JgNO6CTUydLYX8zyjiAvO1c= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= cloud.google.com/go/edgecontainer v0.2.0 h1:hd6J2n5dBBRuAqnNUEsKWrp6XNPKsaxwwIyzOPZTokk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= cloud.google.com/go/errorreporting v0.3.0 h1:kj1XEWMu8P0qlLhm3FwcaFsUvXChV/OraZwA70trRR0= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= cloud.google.com/go/essentialcontacts v1.4.0 h1:b6csrQXCHKQmfo9h3dG/pHyoEh+fQG1Yg78a53LAviY= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= cloud.google.com/go/eventarc v1.8.0 h1:AgCqrmMMIcel5WWKkzz5EkCUKC3Rl5LNMMYsS+LvsI0= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= cloud.google.com/go/filestore v1.4.0 h1:yjKOpzvqtDmL5AXbKttLc8j0hL20kuC1qPdy5HPcxp0= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/firestore v1.9.0 h1:IBlRyxgGySXu5VuW0RgGFlTtLukSnNkpDiEOMkQkmpA= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= cloud.google.com/go/functions v1.9.0 h1:35tgv1fQOtvKqH/uxJMzX3w6usneJ0zXpsFr9KAVhNE= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= cloud.google.com/go/gaming v1.8.0 h1:97OAEQtDazAJD7yh/kvQdSCQuTKdR0O+qWAJBZJ4xiA= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= cloud.google.com/go/gkebackup v0.3.0 h1:4K+jiv4ocqt1niN8q5Imd8imRoXBHTrdnJVt/uFFxF4= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= cloud.google.com/go/gkeconnect v0.6.0 h1:zAcvDa04tTnGdu6TEZewaLN2tdMtUOJJ7fEceULjguA= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= cloud.google.com/go/gkehub v0.10.0 h1:JTcTaYQRGsVm+qkah7WzHb6e9sf1C0laYdRPn9aN+vg= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= cloud.google.com/go/gkemulticloud v0.4.0 h1:8F1NhJj8ucNj7lK51UZMtAjSWTgP1zO18XF6vkfiPPU= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/grafeas v0.2.0 h1:CYjC+xzdPvbV65gi6Dr4YowKcmLo045pm18L0DhdELM= cloud.google.com/go/gsuiteaddons v1.4.0 h1:TGT2oGmO5q3VH6SjcrlgPUWI0njhYv4kywLm6jag0to= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/iam v0.8.0 h1:E2osAkZzxI/+8pZcxVLcDtAQx/u+hZXVryUaYQ5O0Kk= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= cloud.google.com/go/iap v1.5.0 h1:BGEXovwejOCt1zDk8hXq0bOhhRu9haXKWXXXp2B4wBM= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= cloud.google.com/go/ids v1.2.0 h1:LncHK4HHucb5Du310X8XH9/ICtMwZ2PCfK0ScjWiJoY= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= cloud.google.com/go/iot v1.4.0 h1:Y9+oZT9jD4GUZzORXTU45XsnQrhxmDT+TFbPil6pRVQ= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= cloud.google.com/go/kms v1.6.0 h1:OWRZzrPmOZUzurjI2FBGtgY2mB1WaJkqhw6oIwSj0Yg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= cloud.google.com/go/language v1.8.0 h1:3Wa+IUMamL4JH3Zd3cDZUHpwyqplTACt6UZKRD2eCL4= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= cloud.google.com/go/lifesciences v0.6.0 h1:tIqhivE2LMVYkX0BLgG7xL64oNpDaFFI7teunglt1tI= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= cloud.google.com/go/logging v1.6.1 h1:ZBsZK+JG+oCDT+vaxwqF2egKNRjz8soXiS6Xv79benI= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= cloud.google.com/go/managedidentities v1.4.0 h1:3Kdajn6X25yWQFhFCErmKSYTSvkEd3chJROny//F1A0= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= cloud.google.com/go/maps v0.1.0 h1:kLReRbclTgJefw2fcCbdLPLhPj0U6UUWN10ldG8sdOU= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= cloud.google.com/go/mediatranslation v0.6.0 h1:qAJzpxmEX+SeND10Y/4868L5wfZpo4Y3BIEnIieP4dk= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= cloud.google.com/go/memcache v1.7.0 h1:yLxUzJkZVSH2kPaHut7k+7sbIBFpvSh1LW9qjM2JDjA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= cloud.google.com/go/metastore v1.8.0 h1:3KcShzqWdqxrDEXIBWpYJpOOrgpDj+HlBi07Grot49Y= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= cloud.google.com/go/monitoring v1.8.0 h1:c9riaGSPQ4dUKWB+M1Fl0N+iLxstMbCktdEwYSPGDvA= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= cloud.google.com/go/networkconnectivity v1.7.0 h1:BVdIKaI68bihnXGdCVL89Jsg9kq2kg+II30fjVqo62E= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= cloud.google.com/go/networkmanagement v1.5.0 h1:mDHA3CDW00imTvC5RW6aMGsD1bH+FtKwZm/52BxaiMg= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= cloud.google.com/go/networksecurity v0.6.0 h1:qDEX/3sipg9dS5JYsAY+YvgTjPR63cozzAWop8oZS94= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= cloud.google.com/go/notebooks v1.5.0 h1:AC8RPjNvel3ExgXjO1YOAz+teg9+j+89TNxa7pIZfww= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= cloud.google.com/go/optimization v1.2.0 h1:7PxOq9VTT7TMib/6dMoWpMvWS2E4dJEvtYzjvBreaec= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= cloud.google.com/go/orchestration v1.4.0 h1:39d6tqvNjd/wsSub1Bn4cEmrYcet5Ur6xpaN+SxOxtY= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= cloud.google.com/go/orgpolicy v1.5.0 h1:erF5PHqDZb6FeFrUHiYj2JK2BMhsk8CyAg4V4amJ3rE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= cloud.google.com/go/osconfig v1.10.0 h1:NO0RouqCOM7M2S85Eal6urMSSipWwHU8evzwS+siqUI= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= cloud.google.com/go/oslogin v1.7.0 h1:pKGDPfeZHDybtw48WsnVLjoIPMi9Kw62kUE5TXCLCN4= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= cloud.google.com/go/phishingprotection v0.6.0 h1:OrwHLSRSZyaiOt3tnY33dsKSedxbMzsXvqB21okItNQ= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= cloud.google.com/go/policytroubleshooter v1.4.0 h1:NQklJuOUoz1BPP+Epjw81COx7IISWslkZubz/1i0UN8= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= cloud.google.com/go/privatecatalog v0.6.0 h1:Vz86uiHCtNGm1DeC32HeG2VXmOq5JRYA3VRPf8ZEcSg= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= cloud.google.com/go/pubsub v1.27.1 h1:q+J/Nfr6Qx4RQeu3rJcnN48SNC0qzlYzSeqkPq93VHs= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= cloud.google.com/go/pubsublite v1.5.0 h1:iqrD8vp3giTb7hI1q4TQQGj77cj8zzgmMPsTZtLnprM= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/recaptchaenterprise v1.3.1 h1:u6EznTGzIdsyOsvm+Xkw0aSuKFXQlyjGE9a4exk6iNQ= cloud.google.com/go/recaptchaenterprise/v2 v2.5.0 h1:UqzFfb/WvhwXGDF1eQtdHLrmni+iByZXY4h3w9Kdyv8= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= cloud.google.com/go/recommendationengine v0.6.0 h1:6w+WxPf2LmUEqX0YyvfCoYb8aBYOcbIV25Vg6R0FLGw= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= cloud.google.com/go/recommender v1.8.0 h1:9kMZQGeYfcOD/RtZfcNKGKtoex3DdoB4zRgYU/WaIwE= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= cloud.google.com/go/redis v1.10.0 h1:/zTwwBKIAD2DEWTrXZp8WD9yD/gntReF/HkPssVYd0U= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= cloud.google.com/go/resourcemanager v1.4.0 h1:NDao6CHMwEZIaNsdWy+tuvHaavNeGP06o1tgrR0kLvU= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= cloud.google.com/go/resourcesettings v1.4.0 h1:eTzOwB13WrfF0kuzG2ZXCfB3TLunSHBur4s+HFU6uSM= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= cloud.google.com/go/retail v1.11.0 h1:N9fa//ecFUOEPsW/6mJHfcapPV0wBSwIUwpVZB7MQ3o= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= cloud.google.com/go/run v0.3.0 h1:AWPuzU7Xtaj3Jf+QarDWIs6AJ5hM1VFQ+F6Q+VZ6OT4= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= cloud.google.com/go/scheduler v1.7.0 h1:K/mxOewgHGeKuATUJNGylT75Mhtjmx1TOkKukATqMT8= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= cloud.google.com/go/secretmanager v1.9.0 h1:xE6uXljAC1kCR8iadt9+/blg1fvSbmenlsDN4fT9gqw= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= cloud.google.com/go/security v1.10.0 h1:KSKzzJMyUoMRQzcz7azIgqAUqxo7rmQ5rYvimMhikqg= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= cloud.google.com/go/securitycenter v1.16.0 h1:QTVtk/Reqnx2bVIZtJKm1+mpfmwRwymmNvlaFez7fQY= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= cloud.google.com/go/servicecontrol v1.5.0 h1:ImIzbOu6y4jL6ob65I++QzvqgFaoAKgHOG+RU9/c4y8= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= cloud.google.com/go/servicedirectory v1.7.0 h1:f7M8IMcVzO3T425AqlZbP3yLzeipsBHtRza8vVFYMhQ= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= cloud.google.com/go/servicemanagement v1.5.0 h1:TpkCO5M7dhKSy1bKUD9o/sSEW/U1Gtx7opA1fsiMx0c= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= cloud.google.com/go/serviceusage v1.4.0 h1:b0EwJxPJLpavSljMQh0RcdHsUrr5DQ+Nelt/3BAs5ro= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= cloud.google.com/go/shell v1.4.0 h1:b1LFhFBgKsG252inyhtmsUUZwchqSz3WTvAIf3JFo4g= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= cloud.google.com/go/spanner v1.41.0 h1:NvdTpRwf7DTegbfFdPjAWyD7bOVu0VeMqcvR9aCQCAc= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= cloud.google.com/go/speech v1.9.0 h1:yK0ocnFH4Wsf0cMdUyndJQ/hPv02oTJOxzi6AgpBy4s= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= cloud.google.com/go/storage v1.14.0 h1:6RRlFMv1omScs6iq2hfE3IvgE+l6RfJPampq8UZc5TU= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.27.0 h1:YOO045NZI9RKfCj1c5A/ZtuuENUc8OAW+gHdGnDgyMQ= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= cloud.google.com/go/storagetransfer v1.6.0 h1:fUe3OydbbvHcAYp07xY+2UpH4AermGbmnm7qdEj3tGE= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= cloud.google.com/go/talent v1.4.0 h1:MrekAGxLqAeAol4Sc0allOVqUGO8j+Iim8NMvpiD7tM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= cloud.google.com/go/texttospeech v1.5.0 h1:ccPiHgTewxgyAeCWgQWvZvrLmbfQSFABTMAfrSPLPyY= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= cloud.google.com/go/tpu v1.4.0 h1:ztIdKoma1Xob2qm6QwNh4Xi9/e7N3IfvtwG5AcNsj1g= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= cloud.google.com/go/trace v1.4.0 h1:qO9eLn2esajC9sxpqp1YKX37nXC3L4BfGnPS0Cx9dYo= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= cloud.google.com/go/translate v1.4.0 h1:AOYOH3MspzJ/bH1YXzB+xTE8fMpn3mwhLjugwGXvMPI= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= cloud.google.com/go/video v1.9.0 h1:ttlvO4J5c1VGq6FkHqWPD/aH6PfdxujHt+muTJlW1Zk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= cloud.google.com/go/videointelligence v1.9.0 h1:RPFgVVXbI2b5vnrciZjtsUgpNKVtHO/WIyXUhEfuMhA= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/vision v1.2.0 h1:/CsSTkbmO9HC8iQpxbK8ATms3OQaX3YQUeTMGCxlaK4= cloud.google.com/go/vision/v2 v2.5.0 h1:TQHxRqvLMi19azwm3qYuDbEzZWmiKJNTpGbkNsfRCik= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= cloud.google.com/go/vmmigration v1.3.0 h1:A2Tl2ZmwMRpvEmhV2ibISY85fmQR+Y5w9a0PlRz5P3s= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= cloud.google.com/go/vmwareengine v0.1.0 h1:JMPZaOT/gIUxVlTqSl/QQ32Y2k+r0stNeM1NSqhVP9o= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= cloud.google.com/go/vpcaccess v1.5.0 h1:woHXXtnW8b9gLFdWO9HLPalAddBQ9V4LT+1vjKwR3W8= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= cloud.google.com/go/webrisk v1.7.0 h1:ypSnpGlJnZSXbN9a13PDmAYvVekBLnGKxQ3Q9SMwnYY= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= cloud.google.com/go/websecurityscanner v1.4.0 h1:y7yIFg/h/mO+5Y5aCOtVAnpGUOgqCH5rXQ2Oc8Oq2+g= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= cloud.google.com/go/workflows v1.9.0 h1:7Chpin9p50NTU8Tb7qk+I11U/IwVXmDhEoSsdccvInE= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9 h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY= github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8 h1:V8krnnfGj4pV65YLUm3C0/8bl7V5Nry2Pwvy3ru/wLc= github.com/Azure/azure-sdk-for-go v56.3.0+incompatible h1:DmhwMrUIvpeoTDiWRDtNHqelNUd3Og8JCkrLHQK795c= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest/autorest v0.11.24 h1:1fIGgHKqVm54KIPT+q8Zmd1QlVsmHqeUGso5qm2BqqE= github.com/Azure/go-autorest/autorest/adal v0.9.18 h1:kLnPsRjzZZUF3K5REu/Kc+qMQrvuza2bwSnNdhmzLfQ= @@ -130,7 +281,10 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc= +github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46 h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= @@ -139,6 +293,7 @@ github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 h1:w1UutsfOrms1J05zt7I github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= github.com/antihax/optional v1.0.0 h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg= +github.com/aokoli/goutils v1.0.1 h1:7fpzNGoJ3VA8qcrm++XEE1QUe0mIwNeLa02Nwq7RDkg= github.com/apparentlymart/go-cidr v1.0.1 h1:NmIwLZ/KdsjIUlhf+/Np40atNXm/+lZ5txfTJ/SpF+U= github.com/apparentlymart/go-textseg/v12 v12.0.0 h1:bNEQyAGak9tojivJNkoqWErVCQbjdL7GzRt3F8NvfJ0= github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= @@ -146,7 +301,10 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 h1:7Ip0wMmLHLRJdrloDxZfhMm0xrLXZS8+COSu2bXmEQs= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.4.0 h1:yCQqn7dwca4ITXb+CbubHmedzaQYHhNhrEXLYUeEe8Q= +github.com/armon/go-metrics v0.4.0/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aslakhellesoy/gox v1.0.100/go.mod h1:AJl542QsKKG96COVsv0N74HHzVQgDIQPceVUh1aeU2M= github.com/aws/aws-sdk-go v1.43.16 h1:Y7wBby44f+tINqJjw5fLH3vA+gFq4uMITIKqditwM14= github.com/aws/aws-sdk-go-v2 v1.16.3 h1:0W1TSJ7O6OzwuEvIXAtJGvOeQ0SGAhcpxPN2/NK5EhM= @@ -167,6 +325,9 @@ github.com/aws/aws-sdk-go-v2/service/s3 v1.26.9 h1:LCQKnopq2t4oQS3VKivlYTzAHCTJZ github.com/aws/aws-sdk-go-v2/service/sso v1.11.4 h1:Uw5wBybFQ1UeA9ts0Y07gbv0ncZnIAyw858tDW0NP2o= github.com/aws/aws-sdk-go-v2/service/sts v1.16.4 h1:+xtV90n3abQmgzk1pS++FdxZTrPEDgQng6e4/56WR2A= github.com/aws/smithy-go v1.11.2 h1:eG/N+CcUMAvsdffgMvjMKwfyDzIkjM6pfxMJ8Mzc6mE= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0= github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y= @@ -174,9 +335,14 @@ github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJm github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= +github.com/bufbuild/buf v1.12.0/go.mod h1:tgUhc+/7jgJPn6EbcS8oIFOiSRewkyO/9j+Acbby9SQ= +github.com/bufbuild/connect-go v1.4.1/go.mod h1:9iNvh/NOsfhNBUH5CtvXeVUskQO1xsrEviH7ZArwZ3I= +github.com/bufbuild/protocompile v0.1.0/go.mod h1:ix/MMMdsT3fzxfw91dvbfzKW3fRRnuPCP47kpAm5m/4= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ= github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1qxKWjE/Bpp46npY= github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= +github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/checkpoint-restore/go-criu/v5 v5.3.0 h1:wpFFOoomK3389ue2lAb0Boag6XPht5QYpipxmSNL4d8= @@ -184,8 +350,14 @@ github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= github.com/chzyer/readline v1.5.0 h1:lSwwFrbNviGePhkewF1az4oLmcwqCZijQ2/Wi3BGHAI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/cilium/ebpf v0.7.0 h1:1k/q3ATgxSXRdrmPfH8d7YK0GfqVsEKZAX9dQZvs56k= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 h1:hzAQntlaYRkVSFEfj9OTWlVV1H155FMD8BTKktLv0QI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw= github.com/containerd/aufs v1.0.0 h1:2oeJiwX5HstO7shSrPZjrohJZLzK36wvpdmzDRkL/LY= github.com/containerd/btrfs v1.0.0 h1:osn1exbzdub9L5SouXO5swW4ea/xVdJZ3wokxN5GrnA= @@ -226,13 +398,21 @@ github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= github.com/dnephin/pflag v1.0.7 h1:oxONGlWxhmUct0YzKTgrpQv9AUA1wtPBn7zuSjJqptk= github.com/docker/cli-docs-tool v0.5.1 h1:jIk/cCZurZERhALPVKhqlNxTQGxn2kcI+56gE57PQXg= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae h1:UTOyRlLeWJrZx+ynml6q6qzZ1uDkJe/0Z5CMZRbEIJg= github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 h1:xvqufLtNVwAhN8NMyWklVgxnWohi+wtMGQMhtxexlm0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y= github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1 h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0= @@ -257,6 +437,7 @@ github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754 h1:tpom+2CJmpzAWj5 github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.3.1+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= @@ -264,14 +445,44 @@ github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8 github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-containerregistry v0.12.1/go.mod h1:sdIK+oHQO7B93xI8UweYdl887YhuIwg9vz8BSLH3+8k= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60= +github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20221203041831-ce31453925ec/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.2.0 h1:y8Yozv7SZtlU//QXbezB6QkpuE6jMD2/gfzk4AftXjs= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1 h1:RY7tHKZcRlk788d5WSo/e83gOyyy742E8GSs771ySpg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= github.com/googleapis/gax-go/v2 v2.6.0 h1:SXk3ABtQYDT/OH8jAyvEOQ58mgawq5C4o/4/89qN2ZU= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/go-type-adapters v1.0.0 h1:9XdMn+d/G57qq1s8dNc5IesGCXHf6V2HZ2JwRxfA2tA= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8 h1:tlyzajkF3030q6M8SvmJSemC9DTHL/xaMa18b65+JM4= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= @@ -283,11 +494,19 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/hanwen/go-fuse/v2 v2.1.1-0.20220112183258-f57e95bda82d h1:ibbzF2InxMOS+lLCphY9PHNKPURDUBNKaG6ErSq8gJQ= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.15.3 h1:WYONYL2rxTXtlekAqblR2SCdJsizMDIj/uXb5wNy9zU= +github.com/hashicorp/consul/api v1.18.0 h1:R7PPNzTCeN6VuQNDwwhZWJvzCtGSrNpJqfb22h3yH9g= +github.com/hashicorp/consul/api v1.18.0/go.mod h1:owRRGJ9M5xReDC5nfT8FTJrNAPbT4NM6p/k+d03q2v4= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.13.0 h1:lce3nFlpv8humJL8rNrrGHYSKc3q+Kxfeg3Ii1m6ZWU= +github.com/hashicorp/consul/sdk v0.13.0/go.mod h1:0hs/l5fOVhJy/VdcoaNqUSi2AUs95eF5WKtv+EYIQqE= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-cty-funcs v0.0.0-20200930094925-2721b1e36840 h1:kgvybwEeu0SXktbB2y3uLHX9lklLo+nzUwh59A3jzQc= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v1.2.0 h1:La19f8d7WIlm4ogzNHB0JGqs5AUDAZ2UfCY4sJXcJdM= +github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= @@ -295,26 +514,42 @@ github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjh github.com/hashicorp/go-memdb v1.3.0/go.mod h1:Mluclgwib3R93Hk5fxEfiRhB+6Dar64wWh71LpNSe3g= github.com/hashicorp/go-memdb v1.3.2 h1:RBKHOsnSszpU6vxq80LzC2BaQjuuvoyaQbkLTf7V7g8= github.com/hashicorp/go-memdb v1.3.2/go.mod h1:Mluclgwib3R93Hk5fxEfiRhB+6Dar64wWh71LpNSe3g= +github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.0.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl/v2 v2.8.2 h1:wmFle3D1vu0okesm8BTLVDyJ6/OL9DCLUwn0b2OptiY= +github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/mdns v1.0.4 h1:sY0CMhFmjIPDMlTB+HfymFHCaYLhgifZ0QhjaYKD/UQ= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= +github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.9.8 h1:JGklO/2Drf1QGa312EieQN3zhxQ+aJg6pG+aC3MFaVo= +github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= +github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2 h1:rcanfLhLDA8nozr/K289V1zcntHr3V+SHlXwzz1ZI2g= github.com/intel/goresctrl v0.2.0 h1:JyZjdMQu9Kl/wLXe9xA6s1X+tF6BWsQPFGJMEeCfWzE= @@ -323,6 +558,7 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v1.0.0 h1:8X1gzZpR+nVQLAht+L/foqOeX2l9DTZoaIPbEQHxsds= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8 h1:UUHMLvzt/31azWTN/ifGWef4WUqvXk0iRqdhdy/2uzI= @@ -340,14 +576,27 @@ github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2 h1:JgVTCPf0uBVcUSWpyXmGpgOc62nK5HWUBKAGc3Qqa5k= github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-sqlite3 v1.6.0 h1:TDwTWbeII+88Qy55nWlof0DclgAtI4LqGujkYMzmQII= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0 h1:tEElEatulEHDeedTxwckzyYMA5c86fbmNIUL1hBIiTg= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM= +github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= @@ -365,28 +614,43 @@ github.com/oklog/ulid/v2 v2.1.0 h1:+9lhoxAP56we25tyYETBBY1YLA2SaoLvUFgrP2miPJU= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo/v2 v2.0.0 h1:CcuG/HvWNkkaqCUpJifQY8z7qEMBJya6aLPx6ftGyjQ= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A= github.com/pkg/sftp v1.13.1 h1:I2qBYMChEhIjOgazfJmV3/mZM256btk6wkCDRmW7JYs= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/pquerna/cachecontrol v0.1.0 h1:yJMy84ti9h/+OEWa752kBTKv4XC30OtVVHYv/8cTqKc= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s= +github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sagikazarmark/crypt v0.8.0 h1:xtk0uUHVWVsRBdEUGYBym4CXbcllXky2M7Qlwsf8C0Y= +github.com/sagikazarmark/crypt v0.9.0 h1:fipzMFW34hFUEc4D7fsLQFtE7yElkpgyS2zruedRdZk= +github.com/sagikazarmark/crypt v0.9.0/go.mod h1:RnH7sEhxfdnPm1z+XMgSLjWTEIjyK4z2dw6+4vHTMuo= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646 h1:RpforrEYXWkmGwJHIGnLZ3tTWStkjVVstwzNGqxX2Ds= github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= @@ -395,12 +659,15 @@ github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb6 github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I= github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= +github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/tchap/go-patricia v2.2.6+incompatible h1:JvoDL7JSoIP2HDE8AbDH3zC8QBPxmzYe32HHy5yQ+Ck= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tonistiigi/go-actions-cache v0.0.0-20220404170428-0bdeb6e1eac7 h1:8eY6m1mjgyB8XySUR7WvebTM8D/Vs86jLJzD/Tw7zkc= github.com/tonistiigi/go-archvariant v1.0.0 h1:5LC1eDWiBNflnTF1prCiX09yfNHIxDC/aukdhCdTyb0= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/urfave/cli v1.22.4 h1:u7tSpNPPswAFymm8IehJhy4uJMlUuU/GmqSkvJ1InXA= github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5 h1:+UB2BJA852UkGH42H+Oee69djmxS3ANzl2b/JtT1YiA= github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA= @@ -417,29 +684,208 @@ github.com/zclconf/go-cty v1.10.0 h1:mp9ZXQeIcN8kAwuqorjH+Q+njbJKjLrvB2yIh4q7U+0 go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/etcd/api/v3 v3.5.5 h1:BX4JIbQ7hl7+jL+g+2j5UAr0o1bctCm6/Ct+ArBGkf0= +go.etcd.io/etcd/api/v3 v3.5.6 h1:Cy2qx3npLcYqTKqGJzMypnMv2tiRyifZJ17BlWIWA7A= +go.etcd.io/etcd/api/v3 v3.5.6/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8= go.etcd.io/etcd/client/pkg/v3 v3.5.5 h1:9S0JUVvmrVl7wCF39iTQthdaaNIiAaQbmK75ogO6GU8= +go.etcd.io/etcd/client/pkg/v3 v3.5.6 h1:TXQWYceBKqLp4sa87rcPs11SXxUA/mHwH975v+BDvLU= +go.etcd.io/etcd/client/pkg/v3 v3.5.6/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ= go.etcd.io/etcd/client/v2 v2.305.5 h1:DktRP60//JJpnPC0VBymAN/7V71GHMdjDCBt4ZPXDjI= +go.etcd.io/etcd/client/v2 v2.305.6 h1:fIDR0p4KMjw01MJMfUIDWdQbjo06PD6CeYM5z4EHLi0= +go.etcd.io/etcd/client/v2 v2.305.6/go.mod h1:BHha8XJGe8vCIBfWBpbBLVZ4QjOIlfoouvOwydu63E0= go.etcd.io/etcd/client/v3 v3.5.5 h1:q++2WTJbUgpQu4B6hCuT7VkdwaTP7Qz6Daak3WzbrlI= +go.etcd.io/etcd/client/v3 v3.5.6 h1:coLs69PWCXE9G4FKquzNaSHrRyMCAXwF+IX1tAPVO8E= +go.etcd.io/etcd/client/v3 v3.5.6/go.mod h1:f6GRinRMCsFVv9Ht42EyY7nfsVGwrNO0WEoS2pRKzQk= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/otel v1.11.2/go.mod h1:7p4EUV+AqgdlNV9gL97IgUZiVR3yrFXYo53f9BM3tRI= go.opentelemetry.io/otel/exporters/jaeger v1.4.1 h1:VHCK+2yTZDqDaVXj7JH2Z/khptuydo6C0ttBh2bxAbc= go.opentelemetry.io/otel/internal/metric v0.27.0 h1:9dAVGAfFiiEq5NVB9FUJ5et+btbDQAUIJehJ+ikyryk= +go.opentelemetry.io/otel/trace v1.11.2/go.mod h1:4N+yC7QEz7TTsG9BSRLNAa63eg5E06ObSbKPmxQ/pKA= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20221211140036-ad323defaf05 h1:T8EldfGCcveFMewH5xAYxxoX3PSQMrsechlUGVFlQBU= +golang.org/x/exp v0.0.0-20230131160201-f062dba9d201 h1:BEABXpNXLEz0WxtA+6CQIz2xkg80e+1zrhWyMcq8VzE= golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= google.golang.org/api v0.102.0 h1:JxJl2qQ85fRMPNvlZY/enexbxpCjLwGhZUtgfGeQ51I= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.107.0 h1:I2SlFjD8ZWabaIFOfeEDg3pf0BHJDh6iYQ1ic3Yu/UU= +google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8 h1:Cpp2P6TPjujNoC5M2KHY6g7wfyLYfIWRZaSdIKfDasA= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/dancannon/gorethink.v3 v3.0.5 h1:/g7PWP7zUS6vSNmHSDbjCHQh1Rqn8Jy6zSMQxAsBSMQ= diff --git a/scripts/docker/nginx-oss/entrypoint.sh b/scripts/docker/nginx-oss/entrypoint.sh index ab2dd356c8..c777a66123 100644 --- a/scripts/docker/nginx-oss/entrypoint.sh +++ b/scripts/docker/nginx-oss/entrypoint.sh @@ -21,8 +21,7 @@ echo "starting nginx ..." nginx_pid=$! -cp /agent/nginx-agent.conf /etc/nginx-agent/nginx-agent.conf -cat /etc/nginx-agent/nginx-agent.conf +cat /etc/nginx-agent/nginx-agent.conf; # start nginx-agent, pass args echo "starting nginx-agent ..." diff --git a/scripts/docker/nginx-oss/ubuntu/Dockerfile b/scripts/docker/nginx-oss/ubuntu/Dockerfile index d434807dac..f622d568d9 100644 --- a/scripts/docker/nginx-oss/ubuntu/Dockerfile +++ b/scripts/docker/nginx-oss/ubuntu/Dockerfile @@ -2,11 +2,12 @@ ARG BASE_IMAGE FROM ${BASE_IMAGE} as install-nginx LABEL maintainer="NGINX Agent Maintainers " -ARG NGINX_CONF +ARG DEBIAN_FRONTEND=noninteractive ARG ENTRY_POINT +ARG PACKAGE_NAME WORKDIR /agent -COPY ./ /agent +COPY ./build /agent/build COPY $ENTRY_POINT /agent/entrypoint.sh RUN set -x \ @@ -30,12 +31,8 @@ EXPOSE 80 443 ENTRYPOINT ["/agent/entrypoint.sh"] - FROM install-nginx as install-agent ARG PACKAGE_NAME -ARG AGENT_CONF -COPY $AGENT_CONF /agent/nginx-agent.conf -COPY --from=install-nginx /agent/nginx-agent.conf /etc/nginx-agent/nginx-agent.conf -RUN apt install -y -f /agent/build/$PACKAGE_NAME.deb +RUN apt install -y /agent/build/$PACKAGE_NAME.deb diff --git a/scripts/tools.go b/scripts/tools.go index c54fd5e847..89260ccdf0 100644 --- a/scripts/tools.go +++ b/scripts/tools.go @@ -14,4 +14,5 @@ import ( _ "github.com/pseudomuto/protoc-gen-doc/cmd/protoc-gen-doc" _ "github.com/go-swagger/go-swagger/cmd/swagger" _ "github.com/bufbuild/buf/cmd/buf" + _ "github.com/goreleaser/nfpm/v2/cmd/nfpm" ) \ No newline at end of file diff --git a/sdk/go.mod b/sdk/go.mod index ea9dcf0c3f..c422c8430d 100644 --- a/sdk/go.mod +++ b/sdk/go.mod @@ -9,20 +9,19 @@ require ( github.com/nginxinc/nginx-go-crossplane v0.4.1 github.com/sirupsen/logrus v1.9.0 github.com/stretchr/testify v1.8.1 - google.golang.org/grpc v1.51.0 + google.golang.org/grpc v1.52.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/google/go-cmp v0.5.9 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/stretchr/objx v0.5.0 // indirect golang.org/x/net v0.5.0 // indirect golang.org/x/sys v0.4.0 // indirect golang.org/x/text v0.6.0 // indirect - google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 // indirect + google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/sdk/go.sum b/sdk/go.sum index 103f34d0d3..a5207899a3 100644 --- a/sdk/go.sum +++ b/sdk/go.sum @@ -44,7 +44,6 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= @@ -188,15 +187,15 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 h1:jmIfw8+gSvXcZSgaFAGyInDXeWzUhvYH57G/5GKMn70= -google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef h1:uQ2vjV/sHTsWSqdKeLqmwitzgvjMl7o4IdtHwUDXSJY= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.52.0 h1:kd48UiU7EHsV4rnLyOJRuP/Il/UHE7gdDAQ+SZI7nZk= +google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/sdk/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/sdk/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index f34a38e4e9..a6b5081888 100644 --- a/sdk/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/sdk/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/rpc/status.proto package status @@ -48,11 +48,13 @@ type Status struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + // The status code, which should be an enum value of + // [google.rpc.Code][google.rpc.Code]. Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` // A developer-facing error message, which should be in English. Any // user-facing error message should be localized and sent in the - // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized + // by the client. Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` // A list of messages that carry the error details. There is a common set of // message types for APIs to use. diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/sdk/vendor/google.golang.org/grpc/balancer/grpclb/grpclbstate/state.go similarity index 87% rename from vendor/google.golang.org/grpc/balancer/grpclb/state/state.go rename to sdk/vendor/google.golang.org/grpc/balancer/grpclb/grpclbstate/state.go index 4ecfa1c215..cece046be3 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go +++ b/sdk/vendor/google.golang.org/grpc/balancer/grpclb/grpclbstate/state.go @@ -16,9 +16,9 @@ * */ -// Package state declares grpclb types to be set by resolvers wishing to pass -// information to grpclb via resolver.State Attributes. -package state +// Package grpclbstate declares grpclb types to be set by resolvers wishing to +// pass information to grpclb via resolver.State Attributes. +package grpclbstate import ( "google.golang.org/grpc/resolver" @@ -27,7 +27,7 @@ import ( // keyType is the key to use for storing State in Attributes. type keyType string -const key = keyType("grpc.grpclb.state") +const key = keyType("grpc.grpclb.grpclbstate") // State contains gRPCLB-relevant data passed from the name resolver. type State struct { diff --git a/sdk/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/sdk/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 64a232f281..66d141fce7 100644 --- a/sdk/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/sdk/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,14 +18,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" @@ -41,10 +40,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Enumerates the type of event // Note the terminology is different from the RPC semantics // definition, but the same meaning is expressed here. diff --git a/sdk/vendor/google.golang.org/grpc/clientconn.go b/sdk/vendor/google.golang.org/grpc/clientconn.go index 422639c79d..0456689045 100644 --- a/sdk/vendor/google.golang.org/grpc/clientconn.go +++ b/sdk/vendor/google.golang.org/grpc/clientconn.go @@ -788,10 +788,16 @@ func (cc *ClientConn) incrCallsFailed() { func (ac *addrConn) connect() error { ac.mu.Lock() if ac.state == connectivity.Shutdown { + if logger.V(2) { + logger.Infof("connect called on shutdown addrConn; ignoring.") + } ac.mu.Unlock() return errConnClosing } if ac.state != connectivity.Idle { + if logger.V(2) { + logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state) + } ac.mu.Unlock() return nil } @@ -1268,6 +1274,9 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onGoAway, onClose) if err != nil { + if logger.V(2) { + logger.Infof("Creating new client transport to %q: %v", addr, err) + } // newTr is either nil, or closed. hcancel() channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) diff --git a/sdk/vendor/google.golang.org/grpc/dialoptions.go b/sdk/vendor/google.golang.org/grpc/dialoptions.go index 9372dc322e..8f5b536f11 100644 --- a/sdk/vendor/google.golang.org/grpc/dialoptions.go +++ b/sdk/vendor/google.golang.org/grpc/dialoptions.go @@ -116,8 +116,9 @@ func newFuncDialOption(f func(*dialOptions)) *funcDialOption { // be twice the size to keep syscalls low. The default value for this buffer is // 32KB. // -// Zero will disable the write buffer such that each write will be on underlying -// connection. Note: A Send call may not directly translate to a write. +// Zero or negative values will disable the write buffer such that each write +// will be on underlying connection. Note: A Send call may not directly +// translate to a write. func WithWriteBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.WriteBufferSize = s @@ -127,8 +128,9 @@ func WithWriteBufferSize(s int) DialOption { // WithReadBufferSize lets you set the size of read buffer, this determines how // much data can be read at most for each read syscall. // -// The default value for this buffer is 32KB. Zero will disable read buffer for -// a connection so data framer can access the underlying conn directly. +// The default value for this buffer is 32KB. Zero or negative values will +// disable read buffer for a connection so data framer can access the +// underlying conn directly. func WithReadBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.ReadBufferSize = s diff --git a/sdk/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/sdk/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 179f4a26d1..85e3ff2816 100644 --- a/sdk/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/sdk/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -121,7 +121,7 @@ func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated // but not counted towards the size limit. continue } - currentEntryLen := uint64(len(entry.Value)) + currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue())) if currentEntryLen > bytesLimit { break } diff --git a/sdk/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/sdk/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 75301c5149..d51302e65c 100644 --- a/sdk/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/sdk/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -32,7 +32,7 @@ import ( "sync" "time" - grpclbstate "google.golang.org/grpc/balancer/grpclb/state" + grpclbstate "google.golang.org/grpc/balancer/grpclb/grpclbstate" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" @@ -140,10 +140,10 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts disableServiceConfig: opts.DisableServiceConfig, } - if target.Authority == "" { + if target.URL.Host == "" { d.resolver = defaultResolver } else { - d.resolver, err = customAuthorityResolver(target.Authority) + d.resolver, err = customAuthorityResolver(target.URL.Host) if err != nil { return nil, err } diff --git a/sdk/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/sdk/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go index 520d9229e1..c6e08221ff 100644 --- a/sdk/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go +++ b/sdk/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -20,13 +20,20 @@ // name without scheme back to gRPC as resolved address. package passthrough -import "google.golang.org/grpc/resolver" +import ( + "errors" + + "google.golang.org/grpc/resolver" +) const scheme = "passthrough" type passthroughBuilder struct{} func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + if target.Endpoint == "" && opts.Dialer == nil { + return nil, errors.New("passthrough: received empty target in Build()") + } r := &passthroughResolver{ target: target, cc: cc, diff --git a/sdk/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/sdk/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go index 7f1a702cac..1609116877 100644 --- a/sdk/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +++ b/sdk/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -34,8 +34,8 @@ type builder struct { } func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { - if target.Authority != "" { - return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority) + if target.URL.Host != "" { + return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host) } // gRPC was parsing the dial target manually before PR #4817, and we diff --git a/sdk/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/sdk/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 409769f48f..aaa9c859a3 100644 --- a/sdk/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/sdk/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -191,7 +191,7 @@ type goAway struct { code http2.ErrCode debugData []byte headsUp bool - closeConn bool + closeConn error // if set, loopyWriter will exit, resulting in conn closure } func (*goAway) isTransportResponseFrame() bool { return false } @@ -209,6 +209,14 @@ type outFlowControlSizeRequest struct { func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } +// closeConnection is an instruction to tell the loopy writer to flush the +// framer and exit, which will cause the transport's connection to be closed +// (by the client or server). The transport itself will close after the reader +// encounters the EOF caused by the connection closure. +type closeConnection struct{} + +func (closeConnection) isTransportResponseFrame() bool { return false } + type outStreamState int const ( @@ -408,7 +416,7 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { select { case <-c.ch: case <-c.done: - return nil, ErrConnClosing + return nil, errors.New("transport closed by client") } } } @@ -519,18 +527,6 @@ const minBatchSize = 1000 // As an optimization, to increase the batch size for each flush, loopy yields the processor, once // if the batch size is too low to give stream goroutines a chance to fill it up. func (l *loopyWriter) run() (err error) { - defer func() { - if err == ErrConnClosing { - // Don't log ErrConnClosing as error since it happens - // 1. When the connection is closed by some other known issue. - // 2. User closed the connection. - // 3. A graceful close of connection. - if logger.V(logLevel) { - logger.Infof("transport: loopyWriter.run returning. %v", err) - } - err = nil - } - }() for { it, err := l.cbuf.get(true) if err != nil { @@ -574,7 +570,6 @@ func (l *loopyWriter) run() (err error) { } l.framer.writer.Flush() break hasdata - } } } @@ -662,11 +657,10 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { func (l *loopyWriter) originateStream(str *outStream) error { hdr := str.itl.dequeue().(*headerFrame) if err := hdr.initStream(str.id); err != nil { - if err == ErrConnClosing { - return err + if err == errStreamDrain { // errStreamDrain need not close transport + return nil } - // Other errors(errStreamDrain) need not close transport. - return nil + return err } if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { return err @@ -764,7 +758,7 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { } } if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { - return ErrConnClosing + return errors.New("finished processing active streams while in draining mode") } return nil } @@ -799,7 +793,7 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { if l.side == clientSide { l.draining = true if len(l.estdStreams) == 0 { - return ErrConnClosing + return errors.New("received GOAWAY with no active streams") } } return nil @@ -817,6 +811,14 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { return nil } +func (l *loopyWriter) closeConnectionHandler() error { + l.framer.writer.Flush() + // Exit loopyWriter entirely by returning an error here. This will lead to + // the transport closing the connection, and, ultimately, transport + // closure. + return ErrConnClosing +} + func (l *loopyWriter) handle(i interface{}) error { switch i := i.(type) { case *incomingWindowUpdate: @@ -845,6 +847,8 @@ func (l *loopyWriter) handle(i interface{}) error { return l.goAwayHandler(i) case *outFlowControlSizeRequest: return l.outFlowControlSizeRequestHandler(i) + case closeConnection: + return l.closeConnectionHandler() default: return fmt.Errorf("transport: unknown control message type %T", i) } diff --git a/sdk/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/sdk/vendor/google.golang.org/grpc/internal/transport/handler_server.go index fb272235d8..ebe8bfe330 100644 --- a/sdk/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/sdk/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -46,24 +46,32 @@ import ( "google.golang.org/grpc/status" ) -// NewServerHandlerTransport returns a ServerTransport handling gRPC -// from inside an http.Handler. It requires that the http Server -// supports HTTP/2. +// NewServerHandlerTransport returns a ServerTransport handling gRPC from +// inside an http.Handler, or writes an HTTP error to w and returns an error. +// It requires that the http Server supports HTTP/2. func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { if r.ProtoMajor != 2 { - return nil, errors.New("gRPC requires HTTP/2") + msg := "gRPC requires HTTP/2" + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } if r.Method != "POST" { - return nil, errors.New("invalid gRPC request method") + msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } contentType := r.Header.Get("Content-Type") // TODO: do we assume contentType is lowercase? we did before contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) if !validContentType { - return nil, errors.New("invalid gRPC request content-type") + msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType) + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } if _, ok := w.(http.Flusher); !ok { - return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + msg := "gRPC requires a ResponseWriter supporting http.Flusher" + http.Error(w, msg, http.StatusInternalServerError) + return nil, errors.New(msg) } st := &serverHandlerTransport{ @@ -79,7 +87,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s if v := r.Header.Get("grpc-timeout"); v != "" { to, err := decodeTimeout(v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) + msg := fmt.Sprintf("malformed time-out: %v", err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } st.timeoutSet = true st.timeout = to @@ -97,7 +107,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s for _, v := range vv { v, err := decodeMetadataHeader(k, v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) + msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } metakv = append(metakv, k, v) } @@ -141,12 +153,15 @@ type serverHandlerTransport struct { stats []stats.Handler } -func (ht *serverHandlerTransport) Close() { - ht.closeOnce.Do(ht.closeCloseChanOnce) +func (ht *serverHandlerTransport) Close(err error) { + ht.closeOnce.Do(func() { + if logger.V(logLevel) { + logger.Infof("Closing serverHandlerTransport: %v", err) + } + close(ht.closedCh) + }) } -func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } - func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } // strAddr is a net.Addr backed by either a TCP "ip:port" string, or @@ -236,7 +251,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro }) } } - ht.Close() + ht.Close(errors.New("finished writing status")) return err } @@ -346,7 +361,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace case <-ht.req.Context().Done(): } cancel() - ht.Close() + ht.Close(errors.New("request is done processing")) }() req := ht.req diff --git a/sdk/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/sdk/vendor/google.golang.org/grpc/internal/transport/http2_client.go index d518b07e16..3e582a2853 100644 --- a/sdk/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/sdk/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -59,11 +59,15 @@ var clientConnectionCounter uint64 // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { - lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. - ctx context.Context - cancel context.CancelFunc - ctxDone <-chan struct{} // Cache the ctx.Done() chan. - userAgent string + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + // address contains the resolver returned address for this transport. + // If the `ServerName` field is set, it takes precedence over `CallHdr.Host` + // passed to `NewStream`, when determining the :authority header. + address resolver.Address md metadata.MD conn net.Conn // underlying communication channel loopy *loopyWriter @@ -238,8 +242,11 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts go func(conn net.Conn) { defer ctxMonitorDone.Fire() // Signal this goroutine has exited. <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes. - if connectCtx.Err() != nil { + if err := connectCtx.Err(); err != nil { // connectCtx expired before exiting the function. Hard close the connection. + if logger.V(logLevel) { + logger.Infof("newClientTransport: aborting due to connectCtx: %v", err) + } conn.Close() } }(conn) @@ -314,6 +321,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts cancel: cancel, userAgent: opts.UserAgent, registeredCompressors: grpcutil.RegisteredCompressors(), + address: addr, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), @@ -440,10 +448,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts go func() { t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) err := t.loopy.run() - if err != nil { - if logger.V(logLevel) { - logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) - } + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err) } // Do not close the transport. Let reader goroutine handle it since // there might be data in the buffers. @@ -702,6 +708,18 @@ func (e NewStreamError) Error() string { // streams. All non-nil errors returned will be *NewStreamError. func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { ctx = peer.NewContext(ctx, t.getPeer()) + + // ServerName field of the resolver returned address takes precedence over + // Host field of CallHdr to determine the :authority header. This is because, + // the ServerName field takes precedence for server authentication during + // TLS handshake, and the :authority header should match the value used + // for server authentication. + if t.address.ServerName != "" { + newCallHdr := *callHdr + newCallHdr.Host = t.address.ServerName + callHdr = &newCallHdr + } + headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} @@ -934,6 +952,9 @@ func (t *http2Client) Close(err error) { t.mu.Unlock() return } + if logger.V(logLevel) { + logger.Infof("transport: closing: %v", err) + } // Call t.onClose ASAP to prevent the client from attempting to create new // streams. t.onClose() @@ -986,11 +1007,14 @@ func (t *http2Client) GracefulClose() { t.mu.Unlock() return } + if logger.V(logLevel) { + logger.Infof("transport: GracefulClose called") + } t.state = draining active := len(t.activeStreams) t.mu.Unlock() if active == 0 { - t.Close(ErrConnClosing) + t.Close(connectionErrorf(true, nil, "no active streams left to process while draining")) return } t.controlBuf.put(&incomingGoAway{}) diff --git a/sdk/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/sdk/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 3dd15647bc..37e089bc84 100644 --- a/sdk/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/sdk/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -21,6 +21,7 @@ package transport import ( "bytes" "context" + "errors" "fmt" "io" "math" @@ -41,6 +42,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -101,13 +103,13 @@ type http2Server struct { mu sync.Mutex // guard the following - // drainChan is initialized when Drain() is called the first time. - // After which the server writes out the first GoAway(with ID 2^31-1) frame. - // Then an independent goroutine will be launched to later send the second GoAway. - // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. - // Thus call to Drain() will be a no-op if drainChan is already initialized since draining is - // already underway. - drainChan chan struct{} + // drainEvent is initialized when Drain() is called the first time. After + // which the server writes out the first GoAway(with ID 2^31-1) frame. Then + // an independent goroutine will be launched to later send the second + // GoAway. During this time we don't want to write another first GoAway(with + // ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is + // already initialized since draining is already underway. + drainEvent *grpcsync.Event state transportState activeStreams map[uint32]*Stream // idle is the time instant when the connection went idle. @@ -293,7 +295,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, defer func() { if err != nil { - t.Close() + t.Close(err) } }() @@ -331,10 +333,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, go func() { t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler - if err := t.loopy.run(); err != nil { - if logger.V(logLevel) { - logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) - } + err := t.loopy.run() + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err) } t.conn.Close() t.controlBuf.finish() @@ -344,8 +345,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, return t, nil } -// operateHeader takes action on the decoded headers. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { +// operateHeaders takes action on the decoded headers. Returns an error if fatal +// error encountered and transport needs to close, otherwise returns nil. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -361,15 +363,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeFrameSize, onWrite: func() {}, }) - return false + return nil } if streamID%2 != 1 || streamID <= t.maxStreamID { // illegal gRPC stream id. - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) - } - return true + return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame) } t.maxStreamID = streamID @@ -453,7 +452,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( status: status.New(codes.Internal, errMsg), rst: !frame.StreamEnded(), }) - return false + return nil } if !isGRPC || headerError { @@ -463,7 +462,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeProtocol, onWrite: func() {}, }) - return false + return nil } // "If :authority is missing, Host must be renamed to :authority." - A41 @@ -503,7 +502,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( if t.state != reachable { t.mu.Unlock() s.cancel() - return false + return nil } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() @@ -514,7 +513,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( onWrite: func() {}, }) s.cancel() - return false + return nil } if httpMethod != http.MethodPost { t.mu.Unlock() @@ -530,7 +529,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rst: !frame.StreamEnded(), }) s.cancel() - return false + return nil } if t.inTapHandle != nil { var err error @@ -550,7 +549,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( status: stat, rst: !frame.StreamEnded(), }) - return false + return nil } } t.activeStreams[streamID] = s @@ -597,7 +596,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( wq: s.wq, }) handle(s) - return false + return nil } // HandleStreams receives incoming streams using the given handler. This is @@ -630,19 +629,16 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. continue } if err == io.EOF || err == io.ErrUnexpectedEOF { - t.Close() + t.Close(err) return } - if logger.V(logLevel) { - logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) - } - t.Close() + t.Close(err) return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if t.operateHeaders(frame, handle, traceCtx) { - t.Close() + if err := t.operateHeaders(frame, handle, traceCtx); err != nil { + t.Close(err) break } case *http2.DataFrame: @@ -843,8 +839,8 @@ const ( func (t *http2Server) handlePing(f *http2.PingFrame) { if f.IsAck() { - if f.Data == goAwayPing.data && t.drainChan != nil { - close(t.drainChan) + if f.Data == goAwayPing.data && t.drainEvent != nil { + t.drainEvent.Fire() return } // Maybe it's a BDP ping. @@ -886,10 +882,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { if t.pingStrikes > maxPingStrikes { // Send goaway and close the connection. - if logger.V(logLevel) { - logger.Errorf("transport: Got too many pings from the client, closing the connection.") - } - t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")}) } } @@ -1153,7 +1146,7 @@ func (t *http2Server) keepalive() { if logger.V(logLevel) { logger.Infof("transport: closing server transport due to maximum connection age.") } - t.Close() + t.controlBuf.put(closeConnection{}) case <-t.done: } return @@ -1169,10 +1162,7 @@ func (t *http2Server) keepalive() { continue } if outstandingPing && kpTimeoutLeft <= 0 { - if logger.V(logLevel) { - logger.Infof("transport: closing server transport due to idleness.") - } - t.Close() + t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time)) return } if !outstandingPing { @@ -1199,12 +1189,15 @@ func (t *http2Server) keepalive() { // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. -func (t *http2Server) Close() { +func (t *http2Server) Close(err error) { t.mu.Lock() if t.state == closing { t.mu.Unlock() return } + if logger.V(logLevel) { + logger.Infof("transport: closing: %v", err) + } t.state = closing streams := t.activeStreams t.activeStreams = nil @@ -1295,10 +1288,10 @@ func (t *http2Server) RemoteAddr() net.Addr { func (t *http2Server) Drain() { t.mu.Lock() defer t.mu.Unlock() - if t.drainChan != nil { + if t.drainEvent != nil { return } - t.drainChan = make(chan struct{}) + t.drainEvent = grpcsync.NewEvent() t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) } @@ -1319,19 +1312,20 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { // Stop accepting more streams now. t.state = draining sid := t.maxStreamID + retErr := g.closeConn if len(t.activeStreams) == 0 { - g.closeConn = true + retErr = errors.New("second GOAWAY written and no active streams left to process") } t.mu.Unlock() t.maxStreamMu.Unlock() if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { return false, err } - if g.closeConn { + if retErr != nil { // Abruptly close the connection following the GoAway (via // loopywriter). But flush out what's inside the buffer first. t.framer.writer.Flush() - return false, fmt.Errorf("transport: Connection closing") + return false, retErr } return true, nil } @@ -1353,7 +1347,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { timer := time.NewTimer(time.Minute) defer timer.Stop() select { - case <-t.drainChan: + case <-t.drainEvent.Done(): case <-timer.C: case <-t.done: return diff --git a/sdk/vendor/google.golang.org/grpc/internal/transport/transport.go b/sdk/vendor/google.golang.org/grpc/internal/transport/transport.go index 2e615ee20c..6cff20c8e0 100644 --- a/sdk/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/sdk/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -701,7 +701,7 @@ type ServerTransport interface { // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. - Close() + Close(err error) // RemoteAddr returns the remote network address. RemoteAddr() net.Addr diff --git a/sdk/vendor/google.golang.org/grpc/pickfirst.go b/sdk/vendor/google.golang.org/grpc/pickfirst.go index fb7a99e0a2..b3a55481b9 100644 --- a/sdk/vendor/google.golang.org/grpc/pickfirst.go +++ b/sdk/vendor/google.golang.org/grpc/pickfirst.go @@ -102,8 +102,8 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState b.subConn = subConn b.state = connectivity.Idle b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.Idle, - Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}}, + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) b.subConn.Connect() return nil diff --git a/sdk/vendor/google.golang.org/grpc/regenerate.sh b/sdk/vendor/google.golang.org/grpc/regenerate.sh index 99db79fafc..a6f26c8ab0 100644 --- a/sdk/vendor/google.golang.org/grpc/regenerate.sh +++ b/sdk/vendor/google.golang.org/grpc/regenerate.sh @@ -57,7 +57,8 @@ LEGACY_SOURCES=( ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto profiling/proto/service.proto - reflection/grpc_reflection_v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto ) # Generates only the new gRPC Service symbols @@ -119,8 +120,4 @@ mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/ # see grpc_testing_not_regenerate/README.md for details. rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go -# grpc/testing does not have a go_package option. -mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ -mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/ - cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/sdk/vendor/google.golang.org/grpc/server.go b/sdk/vendor/google.golang.org/grpc/server.go index f4dde72b41..2808b7c83e 100644 --- a/sdk/vendor/google.golang.org/grpc/server.go +++ b/sdk/vendor/google.golang.org/grpc/server.go @@ -233,10 +233,11 @@ func newJoinServerOption(opts ...ServerOption) ServerOption { return &joinServerOption{opts: opts} } -// WriteBufferSize determines how much data can be batched before doing a write on the wire. -// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. -// The default value for this buffer is 32KB. -// Zero will disable the write buffer such that each write will be on underlying connection. +// WriteBufferSize determines how much data can be batched before doing a write +// on the wire. The corresponding memory allocation for this buffer will be +// twice the size to keep syscalls low. The default value for this buffer is +// 32KB. Zero or negative values will disable the write buffer such that each +// write will be on underlying connection. // Note: A Send call may not directly translate to a write. func WriteBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { @@ -244,11 +245,10 @@ func WriteBufferSize(s int) ServerOption { }) } -// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most -// for one read syscall. -// The default value for this buffer is 32KB. -// Zero will disable read buffer for a connection so data framer can access the underlying -// conn directly. +// ReadBufferSize lets you set the size of read buffer, this determines how much +// data can be read at most for one read syscall. The default value for this +// buffer is 32KB. Zero or negative values will disable read buffer for a +// connection so data framer can access the underlying conn directly. func ReadBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.readBufferSize = s @@ -942,7 +942,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { } func (s *Server) serveStreams(st transport.ServerTransport) { - defer st.Close() + defer st.Close(errors.New("finished serving streams for the server transport")) var wg sync.WaitGroup var roundRobinCounter uint32 @@ -1008,7 +1008,8 @@ var _ http.Handler = (*Server)(nil) func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + // Errors returned from transport.NewServerHandlerTransport have + // already been written to w. return } if !s.addConn(listenerAddressForServeHTTP, st) { @@ -1046,7 +1047,7 @@ func (s *Server) addConn(addr string, st transport.ServerTransport) bool { s.mu.Lock() defer s.mu.Unlock() if s.conns == nil { - st.Close() + st.Close(errors.New("Server.addConn called when server has already been stopped")) return false } if s.drain { @@ -1150,21 +1151,16 @@ func chainUnaryServerInterceptors(s *Server) { func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { - // the struct ensures the variables are allocated together, rather than separately, since we - // know they should be garbage collected together. This saves 1 allocation and decreases - // time/call by about 10% on the microbenchmark. - var state struct { - i int - next UnaryHandler - } - state.next = func(ctx context.Context, req interface{}) (interface{}, error) { - if state.i == len(interceptors)-1 { - return interceptors[state.i](ctx, req, info, handler) - } - state.i++ - return interceptors[state.i-1](ctx, req, info, state.next) - } - return state.next(ctx, req) + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) + } +} + +func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(ctx context.Context, req interface{}) (interface{}, error) { + return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) } } @@ -1470,21 +1466,16 @@ func chainStreamServerInterceptors(s *Server) { func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { - // the struct ensures the variables are allocated together, rather than separately, since we - // know they should be garbage collected together. This saves 1 allocation and decreases - // time/call by about 10% on the microbenchmark. - var state struct { - i int - next StreamHandler - } - state.next = func(srv interface{}, ss ServerStream) error { - if state.i == len(interceptors)-1 { - return interceptors[state.i](srv, ss, info, handler) - } - state.i++ - return interceptors[state.i-1](srv, ss, info, state.next) - } - return state.next(srv, ss) + return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) + } +} + +func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(srv interface{}, stream ServerStream) error { + return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) } } @@ -1819,7 +1810,7 @@ func (s *Server) Stop() { } for _, cs := range conns { for st := range cs { - st.Close() + st.Close(errors.New("Server.Stop called")) } } if s.opts.numServerWorkers > 0 { diff --git a/sdk/vendor/google.golang.org/grpc/stream.go b/sdk/vendor/google.golang.org/grpc/stream.go index 960c3e33df..0f8e6c0149 100644 --- a/sdk/vendor/google.golang.org/grpc/stream.go +++ b/sdk/vendor/google.golang.org/grpc/stream.go @@ -416,7 +416,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) ctx = trace.NewContext(ctx, trInfo.tr) } - if cs.cc.parsedTarget.Scheme == "xds" { + if cs.cc.parsedTarget.URL.Scheme == "xds" { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( diff --git a/sdk/vendor/google.golang.org/grpc/version.go b/sdk/vendor/google.golang.org/grpc/version.go index 2198e7098d..243e06e8d3 100644 --- a/sdk/vendor/google.golang.org/grpc/version.go +++ b/sdk/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.51.0" +const Version = "1.52.0" diff --git a/sdk/vendor/google.golang.org/grpc/vet.sh b/sdk/vendor/google.golang.org/grpc/vet.sh index bd8e0cdb33..1d03c09148 100644 --- a/sdk/vendor/google.golang.org/grpc/vet.sh +++ b/sdk/vendor/google.golang.org/grpc/vet.sh @@ -121,8 +121,9 @@ done # # TODO(dfawley): don't use deprecated functions in examples or first-party # plugins. +# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs. SC_OUT="$(mktemp)" -staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true +staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true # Error if anything other than deprecation warnings are printed. not grep -v "is deprecated:.*SA1019" "${SC_OUT}" # Only ignore the following deprecated types/fields/functions. diff --git a/sdk/vendor/modules.txt b/sdk/vendor/modules.txt index 37a08ed717..83cace2aef 100644 --- a/sdk/vendor/modules.txt +++ b/sdk/vendor/modules.txt @@ -19,8 +19,6 @@ github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp -# github.com/google/go-cmp v0.5.9 -## explicit; go 1.13 # github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 ## explicit; go 1.14 github.com/grpc-ecosystem/go-grpc-middleware/retry @@ -65,17 +63,17 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 +# google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.51.0 +# google.golang.org/grpc v1.52.0 ## explicit; go 1.17 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base -google.golang.org/grpc/balancer/grpclb/state +google.golang.org/grpc/balancer/grpclb/grpclbstate google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/channelz diff --git a/test/docker/Dockerfile b/test/docker/Dockerfile index b5500c48a5..c48a179c96 100644 --- a/test/docker/Dockerfile +++ b/test/docker/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:20.04 +FROM ubuntu:22.04 LABEL maintainer="NGINX Docker Maintainers " @@ -12,6 +12,9 @@ LABEL maintainer="NGINX Docker Maintainers " # Download certificate and key from the customer portal (https://account.f5.com) # and copy to the build context +# https://askubuntu.com/questions/909277/avoiding-user-interaction-with-tzdata-when-installing-certbot-in-a-docker-contai +ARG DEBIAN_FRONTEND=noninteractive + RUN --mount=type=secret,id=nginx-crt,dst=nginx-repo.crt \ --mount=type=secret,id=nginx-key,dst=nginx-repo.key \ set -x \ diff --git a/test/integration/api/api_test.go b/test/integration/api/api_test.go index d22f7951ef..c68c3b32de 100644 --- a/test/integration/api/api_test.go +++ b/test/integration/api/api_test.go @@ -18,7 +18,7 @@ import ( ) const ( - port = 9091 + PORT = 9091 ) func setupTestContainer(t *testing.T) { @@ -47,7 +47,7 @@ func TestAPI_Nginx(t *testing.T) { client := resty.New() client.SetRetryCount(3).SetRetryWaitTime(50 * time.Millisecond).SetRetryMaxWaitTime(200 * time.Millisecond) - url := fmt.Sprintf("http://localhost:%d/nginx", port) + url := fmt.Sprintf("http://localhost:%d/nginx", PORT) resp, err := client.R().EnableTrace().Get(url) assert.NoError(t, err) @@ -74,14 +74,13 @@ func TestAPI_Nginx(t *testing.T) { assert.Equal(t, detail[1], "/usr/local/nginx/conf/nginx.conf") } } - } func TestAPI_Metrics(t *testing.T) { setupTestContainer(t) client := resty.New() - url := fmt.Sprintf("http://localhost:%d/metrics", port) + url := fmt.Sprintf("http://localhost:%d/metrics", PORT) client.SetRetryCount(5).SetRetryWaitTime(5 * time.Second).SetRetryMaxWaitTime(5 * time.Second) client.AddRetryCondition( func(r *resty.Response, err error) bool { diff --git a/test/integration/api/docker-compose.yml b/test/integration/api/docker-compose.yml index e90a874281..8fa09bf179 100644 --- a/test/integration/api/docker-compose.yml +++ b/test/integration/api/docker-compose.yml @@ -13,10 +13,11 @@ services: args: PACKAGE_NAME: ${PACKAGE_NAME} BASE_IMAGE: ${BASE_IMAGE} - AGENT_CONF: "./test/testdata/configs/integration/api/nginx-agent.conf" - NGINX_CONF: "./test/testdata/configs/integration/api/nginx.conf" ENTRY_POINT: "./scripts/docker/nginx-oss/entrypoint.sh" ports: - 9091:9091 networks: - monitoring + volumes: + - ./nginx.conf:/etc/nginx/nginx.conf + - ./nginx-agent.conf:/etc/nginx-agent/nginx-agent.conf diff --git a/test/testdata/configs/integration/api/nginx-agent.conf b/test/integration/api/nginx-agent.conf similarity index 100% rename from test/testdata/configs/integration/api/nginx-agent.conf rename to test/integration/api/nginx-agent.conf diff --git a/test/testdata/configs/integration/api/nginx.conf b/test/integration/api/nginx.conf similarity index 92% rename from test/testdata/configs/integration/api/nginx.conf rename to test/integration/api/nginx.conf index 431090788e..ba9cac5705 100644 --- a/test/testdata/configs/integration/api/nginx.conf +++ b/test/integration/api/nginx.conf @@ -35,7 +35,7 @@ http { #gzip on; server { - listen 80; + listen 8080; server_name localhost; #charset koi8-r; @@ -43,7 +43,7 @@ http { #access_log logs/host.access.log main; location / { - root /usr/local/nginx/html; + root /usr/share/nginx/html; index index.html index.htm; } @@ -62,7 +62,7 @@ http { # error_page 500 502 503 504 /50x.html; location = /50x.html { - root /usr/local/nginx/html; + root /usr/share/nginx/html; } } } diff --git a/test/integration/go.mod b/test/integration/go.mod index 422eaa24f6..02d17a8b3c 100644 --- a/test/integration/go.mod +++ b/test/integration/go.mod @@ -14,9 +14,9 @@ require ( ) require ( - cloud.google.com/go/compute/metadata v0.2.1 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect github.com/AlecAivazis/survey/v2 v2.3.6 // indirect - github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/Microsoft/go-winio v0.6.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/buger/goterm v1.0.4 // indirect @@ -31,10 +31,10 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/distribution/v3 v3.0.0-20221201083218-92d136e113cf // indirect github.com/docker/buildx v0.9.1 // indirect - github.com/docker/cli v20.10.21+incompatible // indirect + github.com/docker/cli v20.10.23+incompatible // indirect github.com/docker/compose/v2 v2.15.0 // indirect github.com/docker/distribution v2.8.1+incompatible // indirect - github.com/docker/docker v20.10.21+incompatible // indirect + github.com/docker/docker v23.0.0+incompatible // indirect github.com/docker/docker-credential-helpers v0.7.0 // indirect github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect github.com/docker/go-connections v0.4.0 // indirect @@ -46,6 +46,7 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/gofrs/flock v0.8.1 // indirect + github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/gogo/googleapis v1.4.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/mock v1.6.0 // indirect @@ -66,7 +67,7 @@ require ( github.com/jinzhu/inflection v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect - github.com/klauspost/compress v1.15.13 // indirect + github.com/klauspost/compress v1.15.15 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-colorable v0.1.12 // indirect @@ -104,7 +105,7 @@ require ( github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 // indirect github.com/spf13/cobra v1.6.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/spf13/viper v1.14.0 // indirect + github.com/spf13/viper v1.15.0 // indirect github.com/theupdateframework/notary v0.7.0 // indirect github.com/tklauser/go-sysconf v0.3.10 // indirect github.com/tklauser/numcpus v0.5.0 // indirect @@ -118,14 +119,14 @@ require ( go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.29.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0 // indirect - go.opentelemetry.io/otel v1.11.2 // indirect + go.opentelemetry.io/otel v1.12.0 // indirect go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.1 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.4.1 // indirect go.opentelemetry.io/otel/metric v0.34.0 // indirect - go.opentelemetry.io/otel/sdk v1.11.1 // indirect - go.opentelemetry.io/otel/trace v1.11.2 // indirect + go.opentelemetry.io/otel/sdk v1.12.0 // indirect + go.opentelemetry.io/otel/trace v1.12.0 // indirect go.opentelemetry.io/proto/otlp v0.12.0 // indirect golang.org/x/crypto v0.5.0 // indirect golang.org/x/mod v0.7.0 // indirect @@ -138,8 +139,8 @@ require ( golang.org/x/time v0.1.0 // indirect golang.org/x/tools v0.5.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 // indirect - google.golang.org/grpc v1.51.0 // indirect + google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect + google.golang.org/grpc v1.52.0 // indirect google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/test/integration/go.sum b/test/integration/go.sum index 301be504ee..a521b38fa1 100644 --- a/test/integration/go.sum +++ b/test/integration/go.sum @@ -20,9 +20,9 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.13.0 h1:AYrLkB8NPdDRslNp4Jxmzrhdr03fUAIDbiGFjLWowoU= -cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48= -cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute v1.14.0 h1:hfm2+FfxVmnRlh6LpB7cg1ZNU+5edAHmW679JePztk0= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -37,8 +37,7 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/AlecAivazis/survey/v2 v2.3.6 h1:NvTuVHISgTHEHeBFqt6BHOe4Ny/NwGZr7w+F8S9ziyw= github.com/AlecAivazis/survey/v2 v2.3.6/go.mod h1:4AuI9b7RjAR+G7v9+C4YSlX/YL3K3cWNXgWXOhllqvI= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= @@ -211,7 +210,7 @@ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gofrs/uuid v4.3.1+incompatible h1:0/KbAdpx3UXAx1kEOWHJeOkpbgRFGHVgv+CFIY7dBJI= +github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -348,8 +347,8 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNU github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.15.13 h1:NFn1Wr8cfnenSJSA46lLq4wHCcBzKTSjnBIexDMMOV0= -github.com/klauspost/compress v1.15.13/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= +github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -457,7 +456,7 @@ github.com/orcaman/concurrent-map v1.0.0 h1:I/2A2XPCb4IuQWcQhBhSwGfiuybl/J0ev9HD github.com/orcaman/concurrent-map v1.0.0/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= +github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -521,7 +520,7 @@ github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw= +github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk= github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= @@ -534,8 +533,8 @@ github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v0.0.0-20150530192845-be5ff3e4840c/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= -github.com/spf13/viper v1.14.0 h1:Rg7d3Lo706X9tHsJMUjdiwMpHB7W8WnSVOssIY+JElU= -github.com/spf13/viper v1.14.0/go.mod h1:WT//axPky3FdvXHzGw33dNdXXXfFQqmEalje+egj8As= +github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= +github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -552,7 +551,7 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= +github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/testcontainers/testcontainers-go v0.17.0 h1:UdKSw2DJXinlS6ijbFb4VHpQzD+EfTwcTq1/19a+8PU= github.com/testcontainers/testcontainers-go v0.17.0/go.mod h1:n5trpHrB68IUelEqGNC8VipaCo6jOGusU44kIK11XRs= @@ -605,8 +604,7 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0 h1:yt2NKzK go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0/go.mod h1:+ARmXlUlc51J7sZeCBkBJNdHGySrdOzgzxp6VWRWM1U= go.opentelemetry.io/otel v1.4.0/go.mod h1:jeAqMFKy2uLIxCtKxoFj0FAL5zAPKQagc3+GtBWakzk= go.opentelemetry.io/otel v1.4.1/go.mod h1:StM6F/0fSwpd8dKWDCdRr7uRvEPYdW0hBSlbdTiUde4= -go.opentelemetry.io/otel v1.11.2 h1:YBZcQlsVekzFsFbjygXMOXSs6pialIZxcjfO/mBDmR0= -go.opentelemetry.io/otel v1.11.2/go.mod h1:7p4EUV+AqgdlNV9gL97IgUZiVR3yrFXYo53f9BM3tRI= +go.opentelemetry.io/otel v1.12.0 h1:IgfC7kqQrRccIKuB7Cl+SRUmsKbEwSGPr0Eu+/ht1SQ= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1 h1:imIM3vRDMyZK1ypQlQlO+brE22I9lRhJsBDXpDWjlz8= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 h1:WPpPsAAs8I2rA47v5u0558meKmmwm1Dj99ZbqCV8sZ8= @@ -618,12 +616,10 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.4.1/go.mod h1 go.opentelemetry.io/otel/metric v0.34.0 h1:MCPoQxcg/26EuuJwpYN1mZTeCYAUGx8ABxfW07YkjP8= go.opentelemetry.io/otel/metric v0.34.0/go.mod h1:ZFuI4yQGNCupurTXCwkeD/zHBt+C2bR7bw5JqUm/AP8= go.opentelemetry.io/otel/sdk v1.4.1/go.mod h1:NBwHDgDIBYjwK2WNu1OPgsIc2IJzmBXNnvIJxJc8BpE= -go.opentelemetry.io/otel/sdk v1.11.1 h1:F7KmQgoHljhUuJyA+9BiU+EkJfyX5nVVF4wyzWZpKxs= -go.opentelemetry.io/otel/sdk v1.11.1/go.mod h1:/l3FE4SupHJ12TduVjUkZtlfFqDCQJlOlithYrdktys= +go.opentelemetry.io/otel/sdk v1.12.0 h1:8npliVYV7qc0t1FKdpU08eMnOjgPFMnriPhn0HH4q3o= go.opentelemetry.io/otel/trace v1.4.0/go.mod h1:uc3eRsqDfWs9R7b92xbQbU42/eTNz4N+gLP8qJCi4aE= go.opentelemetry.io/otel/trace v1.4.1/go.mod h1:iYEVbroFCNut9QkwEczV9vMRPHNKSSwYZjulEtsmhFc= -go.opentelemetry.io/otel/trace v1.11.2 h1:Xf7hWSF2Glv0DE3MH7fBHvtpSBsjcBUe5MYAmZM/+y0= -go.opentelemetry.io/otel/trace v1.11.2/go.mod h1:4N+yC7QEz7TTsG9BSRLNAa63eg5E06ObSbKPmxQ/pKA= +go.opentelemetry.io/otel/trace v1.12.0 h1:p28in++7Kd0r2d8gSt931O57fdjUyWxkVbESuILAeUc= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.12.0 h1:CMJ/3Wp7iOWES+CYLfnBv+DVmPbB+kmy9PJ92XvlR6c= go.opentelemetry.io/proto/otlp v0.12.0/go.mod h1:TsIjwGWIx5VFYv9KGVlOpxoBl5Dy+63SUguV7GGvlSQ= @@ -936,8 +932,8 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 h1:jmIfw8+gSvXcZSgaFAGyInDXeWzUhvYH57G/5GKMn70= -google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef h1:uQ2vjV/sHTsWSqdKeLqmwitzgvjMl7o4IdtHwUDXSJY= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -955,8 +951,8 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.52.0 h1:kd48UiU7EHsV4rnLyOJRuP/Il/UHE7gdDAQ+SZI7nZk= +google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/test/integration/install/install_uninstall_test.go b/test/integration/install/install_uninstall_test.go index f29f08b220..c2b00ce7f9 100644 --- a/test/integration/install/install_uninstall_test.go +++ b/test/integration/install/install_uninstall_test.go @@ -32,14 +32,15 @@ var ( ) func setupTestContainer(t *testing.T) { + ctx := context.Background() comp, err := compose.NewDockerCompose("docker-compose.yml") assert.NoError(t, err, "NewDockerComposeAPI()") t.Cleanup(func() { - assert.NoError(t, comp.Down(context.Background(), compose.RemoveOrphans(true), compose.RemoveImagesLocal), "compose.Down()") + assert.NoError(t, comp.Down(ctx, compose.RemoveOrphans(true), compose.RemoveImagesLocal), "compose.Down()") }) - ctx, cancel := context.WithCancel(context.Background()) + ctxCancel, cancel := context.WithCancel(ctx) t.Cleanup(cancel) require.NoError(t, comp.WaitForService("agent", wait.ForHTTP("/")).WithEnv( @@ -47,9 +48,9 @@ func setupTestContainer(t *testing.T) { "PACKAGE_NAME": os.Getenv("PACKAGE_NAME"), "BASE_IMAGE": os.Getenv("BASE_IMAGE"), }, - ).Up(ctx, compose.Wait(true)), "compose.Up()") + ).Up(ctxCancel, compose.Wait(true)), "compose.Up()") - agentContainer, err = comp.ServiceContainer(context.Background(), "agent") + agentContainer, err = comp.ServiceContainer(ctxCancel, "agent") require.NoError(t, err) } diff --git a/test/integration/vendor/github.com/klauspost/compress/.goreleaser.yml b/test/integration/vendor/github.com/klauspost/compress/.goreleaser.yml index 0af08e65e6..a2bf06e94f 100644 --- a/test/integration/vendor/github.com/klauspost/compress/.goreleaser.yml +++ b/test/integration/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -3,7 +3,7 @@ before: hooks: - ./gen.sh - - go install mvdan.cc/garble@latest + - go install mvdan.cc/garble@v0.7.2 builds: - diff --git a/test/integration/vendor/github.com/klauspost/compress/README.md b/test/integration/vendor/github.com/klauspost/compress/README.md index 7469b40fa4..63f2cd5b25 100644 --- a/test/integration/vendor/github.com/klauspost/compress/README.md +++ b/test/integration/vendor/github.com/klauspost/compress/README.md @@ -9,7 +9,6 @@ This package provides various compression algorithms. * [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. * [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. * [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. -* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here. [![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) [![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) @@ -17,6 +16,17 @@ This package provides various compression algorithms. # changelog +* Jan 3rd, 2023 (v1.15.14) + + * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718 + * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720 + * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722 + * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723 + +* Dec 11, 2022 (v1.15.13) + * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 + * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 + * Oct 26, 2022 (v1.15.12) * zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680 diff --git a/test/integration/vendor/github.com/klauspost/compress/fse/compress.go b/test/integration/vendor/github.com/klauspost/compress/fse/compress.go index 6f341914c6..dac97e58a2 100644 --- a/test/integration/vendor/github.com/klauspost/compress/fse/compress.go +++ b/test/integration/vendor/github.com/klauspost/compress/fse/compress.go @@ -146,54 +146,51 @@ func (s *Scratch) compress(src []byte) error { c1.encodeZero(tt[src[ip-2]]) ip -= 2 } + src = src[:ip] // Main compression loop. switch { case !s.zeroBits && s.actualTableLog <= 8: // We can encode 4 symbols without requiring a flush. // We do not need to check if any output is 0 bits. - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encode(tt[v0]) c1.encode(tt[v1]) c2.encode(tt[v2]) c1.encode(tt[v3]) - ip -= 4 } case !s.zeroBits: // We do not need to check if any output is 0 bits. - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encode(tt[v0]) c1.encode(tt[v1]) s.bw.flush32() c2.encode(tt[v2]) c1.encode(tt[v3]) - ip -= 4 } case s.actualTableLog <= 8: // We can encode 4 symbols without requiring a flush - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encodeZero(tt[v0]) c1.encodeZero(tt[v1]) c2.encodeZero(tt[v2]) c1.encodeZero(tt[v3]) - ip -= 4 } default: - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encodeZero(tt[v0]) c1.encodeZero(tt[v1]) s.bw.flush32() c2.encodeZero(tt[v2]) c1.encodeZero(tt[v3]) - ip -= 4 } } @@ -459,15 +456,17 @@ func (s *Scratch) countSimple(in []byte) (max int) { for _, v := range in { s.count[v]++ } - m := uint32(0) + m, symlen := uint32(0), s.symbolLen for i, v := range s.count[:] { + if v == 0 { + continue + } if v > m { m = v } - if v > 0 { - s.symbolLen = uint16(i) + 1 - } + symlen = uint16(i) + 1 } + s.symbolLen = symlen return int(m) } diff --git a/test/integration/vendor/github.com/klauspost/compress/huff0/bitreader.go b/test/integration/vendor/github.com/klauspost/compress/huff0/bitreader.go index 504a7be9da..e36d9742f9 100644 --- a/test/integration/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ b/test/integration/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -67,7 +67,6 @@ func (b *bitReaderBytes) fillFast() { // 2 bounds checks. v := b.in[b.off-4 : b.off] - v = v[:4] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << (b.bitsRead - 32) b.bitsRead -= 32 @@ -88,8 +87,7 @@ func (b *bitReaderBytes) fill() { return } if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] + v := b.in[b.off-4 : b.off] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << (b.bitsRead - 32) b.bitsRead -= 32 @@ -179,7 +177,6 @@ func (b *bitReaderShifted) fillFast() { // 2 bounds checks. v := b.in[b.off-4 : b.off] - v = v[:4] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.bitsRead -= 32 @@ -200,8 +197,7 @@ func (b *bitReaderShifted) fill() { return } if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] + v := b.in[b.off-4 : b.off] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.bitsRead -= 32 diff --git a/test/integration/vendor/github.com/klauspost/compress/huff0/compress.go b/test/integration/vendor/github.com/klauspost/compress/huff0/compress.go index d9223a91ef..cdc94856f2 100644 --- a/test/integration/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/test/integration/vendor/github.com/klauspost/compress/huff0/compress.go @@ -484,34 +484,35 @@ func (s *Scratch) buildCTable() error { // Different from reference implementation. huffNode0 := s.nodes[0 : huffNodesLen+1] - for huffNode[nonNullRank].count == 0 { + for huffNode[nonNullRank].count() == 0 { nonNullRank-- } lowS := int16(nonNullRank) nodeRoot := nodeNb + lowS - 1 lowN := nodeNb - huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count - huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb) + huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count()) + huffNode[lowS].setParent(nodeNb) + huffNode[lowS-1].setParent(nodeNb) nodeNb++ lowS -= 2 for n := nodeNb; n <= nodeRoot; n++ { - huffNode[n].count = 1 << 30 + huffNode[n].setCount(1 << 30) } // fake entry, strong barrier - huffNode0[0].count = 1 << 31 + huffNode0[0].setCount(1 << 31) // create parents for nodeNb <= nodeRoot { var n1, n2 int16 - if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { n1 = lowS lowS-- } else { n1 = lowN lowN++ } - if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { n2 = lowS lowS-- } else { @@ -519,18 +520,19 @@ func (s *Scratch) buildCTable() error { lowN++ } - huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count - huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb) + huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count()) + huffNode0[n1+1].setParent(nodeNb) + huffNode0[n2+1].setParent(nodeNb) nodeNb++ } // distribute weights (unlimited tree height) - huffNode[nodeRoot].nbBits = 0 + huffNode[nodeRoot].setNbBits(0) for n := nodeRoot - 1; n >= startNode; n-- { - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) } for n := uint16(0); n <= nonNullRank; n++ { - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) } s.actualTableLog = s.setMaxHeight(int(nonNullRank)) maxNbBits := s.actualTableLog @@ -542,7 +544,7 @@ func (s *Scratch) buildCTable() error { var nbPerRank [tableLogMax + 1]uint16 var valPerRank [16]uint16 for _, v := range huffNode[:nonNullRank+1] { - nbPerRank[v.nbBits]++ + nbPerRank[v.nbBits()]++ } // determine stating value per rank { @@ -557,7 +559,7 @@ func (s *Scratch) buildCTable() error { // push nbBits per symbol, symbol order for _, v := range huffNode[:nonNullRank+1] { - s.cTable[v.symbol].nBits = v.nbBits + s.cTable[v.symbol()].nBits = v.nbBits() } // assign value within rank, symbol order @@ -603,12 +605,12 @@ func (s *Scratch) huffSort() { pos := rank[r].current rank[r].current++ prev := nodes[(pos-1)&huffNodesMask] - for pos > rank[r].base && c > prev.count { + for pos > rank[r].base && c > prev.count() { nodes[pos&huffNodesMask] = prev pos-- prev = nodes[(pos-1)&huffNodesMask] } - nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} + nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n)) } } @@ -617,7 +619,7 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { huffNode := s.nodes[1 : huffNodesLen+1] //huffNode = huffNode[: huffNodesLen] - largestBits := huffNode[lastNonNull].nbBits + largestBits := huffNode[lastNonNull].nbBits() // early exit : no elt > maxNbBits if largestBits <= maxNbBits { @@ -627,14 +629,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { baseCost := int(1) << (largestBits - maxNbBits) n := uint32(lastNonNull) - for huffNode[n].nbBits > maxNbBits { - totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)) - huffNode[n].nbBits = maxNbBits + for huffNode[n].nbBits() > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits())) + huffNode[n].setNbBits(maxNbBits) n-- } // n stops at huffNode[n].nbBits <= maxNbBits - for huffNode[n].nbBits == maxNbBits { + for huffNode[n].nbBits() == maxNbBits { n-- } // n end at index of smallest symbol using < maxNbBits @@ -655,10 +657,10 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { { currentNbBits := maxNbBits for pos := int(n); pos >= 0; pos-- { - if huffNode[pos].nbBits >= currentNbBits { + if huffNode[pos].nbBits() >= currentNbBits { continue } - currentNbBits = huffNode[pos].nbBits // < maxNbBits + currentNbBits = huffNode[pos].nbBits() // < maxNbBits rankLast[maxNbBits-currentNbBits] = uint32(pos) } } @@ -675,8 +677,8 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { if lowPos == noSymbol { break } - highTotal := huffNode[highPos].count - lowTotal := 2 * huffNode[lowPos].count + highTotal := huffNode[highPos].count() + lowTotal := 2 * huffNode[lowPos].count() if highTotal <= lowTotal { break } @@ -692,13 +694,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { // this rank is no longer empty rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] } - huffNode[rankLast[nBitsToDecrease]].nbBits++ + huffNode[rankLast[nBitsToDecrease]].setNbBits(1 + + huffNode[rankLast[nBitsToDecrease]].nbBits()) if rankLast[nBitsToDecrease] == 0 { /* special case, reached largest symbol */ rankLast[nBitsToDecrease] = noSymbol } else { rankLast[nBitsToDecrease]-- - if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease { + if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease { rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ } } @@ -706,15 +709,15 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { for totalCost < 0 { /* Sometimes, cost correction overshoot */ if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ - for huffNode[n].nbBits == maxNbBits { + for huffNode[n].nbBits() == maxNbBits { n-- } - huffNode[n+1].nbBits-- + huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1) rankLast[1] = n + 1 totalCost++ continue } - huffNode[rankLast[1]+1].nbBits-- + huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1) rankLast[1]++ totalCost++ } @@ -722,9 +725,26 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { return maxNbBits } -type nodeElt struct { - count uint32 - parent uint16 - symbol byte - nbBits uint8 +// A nodeElt is the fields +// +// count uint32 +// parent uint16 +// symbol byte +// nbBits uint8 +// +// in some order, all squashed into an integer so that the compiler +// always loads and stores entire nodeElts instead of separate fields. +type nodeElt uint64 + +func makeNodeElt(count uint32, symbol byte) nodeElt { + return nodeElt(count) | nodeElt(symbol)<<48 } + +func (e *nodeElt) count() uint32 { return uint32(*e) } +func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) } +func (e *nodeElt) symbol() byte { return byte(*e >> 48) } +func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) } + +func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) } +func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 } +func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 } diff --git a/test/integration/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/test/integration/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s index 8d2187a2ce..c4c7ab2d1f 100644 --- a/test/integration/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s +++ b/test/integration/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -4,360 +4,349 @@ // func decompress4x_main_loop_amd64(ctx *decompress4xContext) TEXT ·decompress4x_main_loop_amd64(SB), $0-8 - XORQ DX, DX - // Preload values MOVQ ctx+0(FP), AX MOVBQZX 8(AX), DI - MOVQ 16(AX), SI - MOVQ 48(AX), BX - MOVQ 24(AX), R9 - MOVQ 32(AX), R10 - MOVQ (AX), R11 + MOVQ 16(AX), BX + MOVQ 48(AX), SI + MOVQ 24(AX), R8 + MOVQ 32(AX), R9 + MOVQ (AX), R10 // Main loop main_loop: - MOVQ SI, R8 - CMPQ R8, BX + XORL DX, DX + CMPQ BX, SI SETGE DL // br0.fillFast32() - MOVQ 32(R11), R12 - MOVBQZX 40(R11), R13 - CMPQ R13, $0x20 + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 JBE skip_fill0 - MOVQ 24(R11), AX - SUBQ $0x20, R13 + MOVQ 24(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ (R11), R14 + MOVQ (R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 24(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 24(R10) + ORQ R13, R11 - // exhausted = exhausted || (br0.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br0.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill0: // val0 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br0.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + MOVW AX, (BX) // update the bitreader structure - MOVQ R12, 32(R11) - MOVB R13, 40(R11) - ADDQ R9, R8 + MOVQ R11, 32(R10) + MOVB R12, 40(R10) // br1.fillFast32() - MOVQ 80(R11), R12 - MOVBQZX 88(R11), R13 - CMPQ R13, $0x20 + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 JBE skip_fill1 - MOVQ 72(R11), AX - SUBQ $0x20, R13 + MOVQ 72(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ 48(R11), R14 + MOVQ 48(R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 72(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 72(R10) + ORQ R13, R11 - // exhausted = exhausted || (br1.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br1.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill1: // val0 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br1.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + MOVW AX, (BX)(R8*1) // update the bitreader structure - MOVQ R12, 80(R11) - MOVB R13, 88(R11) - ADDQ R9, R8 + MOVQ R11, 80(R10) + MOVB R12, 88(R10) // br2.fillFast32() - MOVQ 128(R11), R12 - MOVBQZX 136(R11), R13 - CMPQ R13, $0x20 + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 JBE skip_fill2 - MOVQ 120(R11), AX - SUBQ $0x20, R13 + MOVQ 120(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ 96(R11), R14 + MOVQ 96(R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 120(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 120(R10) + ORQ R13, R11 - // exhausted = exhausted || (br2.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br2.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill2: // val0 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br2.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + MOVW AX, (BX)(R8*2) // update the bitreader structure - MOVQ R12, 128(R11) - MOVB R13, 136(R11) - ADDQ R9, R8 + MOVQ R11, 128(R10) + MOVB R12, 136(R10) // br3.fillFast32() - MOVQ 176(R11), R12 - MOVBQZX 184(R11), R13 - CMPQ R13, $0x20 + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 JBE skip_fill3 - MOVQ 168(R11), AX - SUBQ $0x20, R13 + MOVQ 168(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ 144(R11), R14 + MOVQ 144(R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 168(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 168(R10) + ORQ R13, R11 - // exhausted = exhausted || (br3.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br3.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill3: // val0 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br3.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + LEAQ (R8)(R8*2), CX + MOVW AX, (BX)(CX*1) // update the bitreader structure - MOVQ R12, 176(R11) - MOVB R13, 184(R11) - ADDQ $0x02, SI + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x02, BX TESTB DL, DL JZ main_loop MOVQ ctx+0(FP), AX - SUBQ 16(AX), SI - SHLQ $0x02, SI - MOVQ SI, 40(AX) + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) RET // func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 - XORQ DX, DX - // Preload values MOVQ ctx+0(FP), CX MOVBQZX 8(CX), DI MOVQ 16(CX), BX MOVQ 48(CX), SI - MOVQ 24(CX), R9 - MOVQ 32(CX), R10 - MOVQ (CX), R11 + MOVQ 24(CX), R8 + MOVQ 32(CX), R9 + MOVQ (CX), R10 // Main loop main_loop: - MOVQ BX, R8 - CMPQ R8, SI + XORL DX, DX + CMPQ BX, SI SETGE DL // br0.fillFast32() - MOVQ 32(R11), R12 - MOVBQZX 40(R11), R13 - CMPQ R13, $0x20 + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 JBE skip_fill0 - MOVQ 24(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ (R11), R15 + MOVQ 24(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ (R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 24(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 24(R10) + ORQ R14, R11 - // exhausted = exhausted || (br0.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br0.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill0: // val0 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -365,88 +354,86 @@ skip_fill0: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + MOVL AX, (BX) // update the bitreader structure - MOVQ R12, 32(R11) - MOVB R13, 40(R11) - ADDQ R9, R8 + MOVQ R11, 32(R10) + MOVB R12, 40(R10) // br1.fillFast32() - MOVQ 80(R11), R12 - MOVBQZX 88(R11), R13 - CMPQ R13, $0x20 + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 JBE skip_fill1 - MOVQ 72(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 48(R11), R15 + MOVQ 72(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 48(R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 72(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 72(R10) + ORQ R14, R11 - // exhausted = exhausted || (br1.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br1.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill1: // val0 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -454,88 +441,86 @@ skip_fill1: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + MOVL AX, (BX)(R8*1) // update the bitreader structure - MOVQ R12, 80(R11) - MOVB R13, 88(R11) - ADDQ R9, R8 + MOVQ R11, 80(R10) + MOVB R12, 88(R10) // br2.fillFast32() - MOVQ 128(R11), R12 - MOVBQZX 136(R11), R13 - CMPQ R13, $0x20 + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 JBE skip_fill2 - MOVQ 120(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 96(R11), R15 + MOVQ 120(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 96(R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 120(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 120(R10) + ORQ R14, R11 - // exhausted = exhausted || (br2.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br2.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill2: // val0 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -543,88 +528,86 @@ skip_fill2: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + MOVL AX, (BX)(R8*2) // update the bitreader structure - MOVQ R12, 128(R11) - MOVB R13, 136(R11) - ADDQ R9, R8 + MOVQ R11, 128(R10) + MOVB R12, 136(R10) // br3.fillFast32() - MOVQ 176(R11), R12 - MOVBQZX 184(R11), R13 - CMPQ R13, $0x20 + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 JBE skip_fill3 - MOVQ 168(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 144(R11), R15 + MOVQ 168(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 144(R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 168(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 168(R10) + ORQ R14, R11 - // exhausted = exhausted || (br3.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br3.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill3: // val0 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -632,11 +615,12 @@ skip_fill3: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + LEAQ (R8)(R8*2), CX + MOVL AX, (BX)(CX*1) // update the bitreader structure - MOVQ R12, 176(R11) - MOVB R13, 184(R11) + MOVQ R11, 176(R10) + MOVB R12, 184(R10) ADDQ $0x04, BX TESTB DL, DL JZ main_loop @@ -652,7 +636,7 @@ TEXT ·decompress1x_main_loop_amd64(SB), $0-8 MOVQ 16(CX), DX MOVQ 24(CX), BX CMPQ BX, $0x04 - JB error_max_decoded_size_exeeded + JB error_max_decoded_size_exceeded LEAQ (DX)(BX*1), BX MOVQ (CX), SI MOVQ (SI), R8 @@ -667,7 +651,7 @@ main_loop: // Check if we have room for 4 bytes in the output buffer LEAQ 4(DX), CX CMPQ CX, BX - JGE error_max_decoded_size_exeeded + JGE error_max_decoded_size_exceeded // Decode 4 values CMPQ R11, $0x20 @@ -744,7 +728,7 @@ loop_condition: RET // Report error -error_max_decoded_size_exeeded: +error_max_decoded_size_exceeded: MOVQ ctx+0(FP), AX MOVQ $-1, CX MOVQ CX, 40(AX) @@ -757,7 +741,7 @@ TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 MOVQ 16(CX), DX MOVQ 24(CX), BX CMPQ BX, $0x04 - JB error_max_decoded_size_exeeded + JB error_max_decoded_size_exceeded LEAQ (DX)(BX*1), BX MOVQ (CX), SI MOVQ (SI), R8 @@ -772,7 +756,7 @@ main_loop: // Check if we have room for 4 bytes in the output buffer LEAQ 4(DX), CX CMPQ CX, BX - JGE error_max_decoded_size_exeeded + JGE error_max_decoded_size_exceeded // Decode 4 values CMPQ R11, $0x20 @@ -839,7 +823,7 @@ loop_condition: RET // Report error -error_max_decoded_size_exeeded: +error_max_decoded_size_exceeded: MOVQ ctx+0(FP), AX MOVQ $-1, CX MOVQ CX, 40(AX) diff --git a/test/integration/vendor/github.com/klauspost/compress/zstd/blockdec.go b/test/integration/vendor/github.com/klauspost/compress/zstd/blockdec.go index 6b9929ddf5..2445bb4fe5 100644 --- a/test/integration/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/test/integration/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -192,16 +192,14 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } // Read block data. - if cap(b.dataStorage) < cSize { + if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize { + // byteBuf doesn't need a destination buffer. if b.lowMem || cSize > maxCompressedBlockSize { b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) } else { b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) } } - if cap(b.dst) <= maxSize { - b.dst = make([]byte, 0, maxSize+1) - } b.data, err = br.readBig(cSize, b.dataStorage) if err != nil { if debugDecoder { @@ -210,6 +208,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } return err } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } return nil } diff --git a/test/integration/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/test/integration/vendor/github.com/klauspost/compress/zstd/decodeheader.go index 1f5d41bf41..f6a240970d 100644 --- a/test/integration/vendor/github.com/klauspost/compress/zstd/decodeheader.go +++ b/test/integration/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -152,7 +152,7 @@ func (h *Header) Decode(in []byte) error { } b, in = in[:size], in[size:] h.HeaderSize += int(size) - switch size { + switch len(b) { case 1: h.DictionaryID = uint32(b[0]) case 2: @@ -182,7 +182,7 @@ func (h *Header) Decode(in []byte) error { } b, in = in[:fcsSize], in[fcsSize:] h.HeaderSize += int(fcsSize) - switch fcsSize { + switch len(b) { case 1: h.FrameContentSize = uint64(b[0]) case 2: diff --git a/test/integration/vendor/github.com/klauspost/compress/zstd/decoder.go b/test/integration/vendor/github.com/klauspost/compress/zstd/decoder.go index 30459cd3fb..7113e69ee3 100644 --- a/test/integration/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/test/integration/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -40,8 +40,7 @@ type Decoder struct { frame *frameDec // Custom dictionaries. - // Always uses copies. - dicts map[uint32]dict + dicts map[uint32]*dict // streamWg is the waitgroup for all streams streamWg sync.WaitGroup @@ -103,7 +102,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { } // Transfer option dicts. - d.dicts = make(map[uint32]dict, len(d.o.dicts)) + d.dicts = make(map[uint32]*dict, len(d.o.dicts)) for _, dc := range d.o.dicts { d.dicts[dc.id] = dc } @@ -341,15 +340,8 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { } return dst, err } - if frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - return nil, ErrUnknownDictionary - } - if debugDecoder { - println("setting dict", frame.DictionaryID) - } - frame.history.setDict(&dict) + if err = d.setDict(frame); err != nil { + return nil, err } if frame.WindowSize > d.o.maxWindowSize { if debugDecoder { @@ -495,18 +487,12 @@ func (d *Decoder) nextBlockSync() (ok bool) { if !d.syncStream.inFrame { d.frame.history.reset() d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err == nil { + d.current.err = d.setDict(d.frame) + } if d.current.err != nil { return false } - if d.frame.DictionaryID != nil { - dict, ok := d.dicts[*d.frame.DictionaryID] - if !ok { - d.current.err = ErrUnknownDictionary - return false - } else { - d.frame.history.setDict(&dict) - } - } if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { d.current.err = ErrDecoderSizeExceeded return false @@ -865,13 +851,8 @@ decodeStream: if debugDecoder && err != nil { println("Frame decoder returned", err) } - if err == nil && frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - err = ErrUnknownDictionary - } else { - frame.history.setDict(&dict) - } + if err == nil { + err = d.setDict(frame) } if err == nil && d.frame.WindowSize > d.o.maxWindowSize { if debugDecoder { @@ -953,3 +934,20 @@ decodeStream: hist.reset() d.frame.history.b = frameHistCache } + +func (d *Decoder) setDict(frame *frameDec) (err error) { + dict, ok := d.dicts[frame.DictionaryID] + if ok { + if debugDecoder { + println("setting dict", frame.DictionaryID) + } + frame.history.setDict(dict) + } else if frame.DictionaryID != 0 { + // A zero or missing dictionary id is ambiguous: + // either dictionary zero, or no dictionary. In particular, + // zstd --patch-from uses this id for the source file, + // so only return an error if the dictionary id is not zero. + err = ErrUnknownDictionary + } + return err +} diff --git a/test/integration/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/test/integration/vendor/github.com/klauspost/compress/zstd/decoder_options.go index f42448e69c..07a90dd7af 100644 --- a/test/integration/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/test/integration/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -6,6 +6,8 @@ package zstd import ( "errors" + "fmt" + "math/bits" "runtime" ) @@ -18,7 +20,7 @@ type decoderOptions struct { concurrent int maxDecodedSize uint64 maxWindowSize uint64 - dicts []dict + dicts []*dict ignoreChecksum bool limitToCap bool decodeBufsBelow int @@ -85,7 +87,13 @@ func WithDecoderMaxMemory(n uint64) DOption { } // WithDecoderDicts allows to register one or more dictionaries for the decoder. -// If several dictionaries with the same ID is provided the last one will be used. +// +// Each slice in dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// If several dictionaries with the same ID are provided, the last one will be used. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format func WithDecoderDicts(dicts ...[]byte) DOption { return func(o *decoderOptions) error { for _, b := range dicts { @@ -93,12 +101,24 @@ func WithDecoderDicts(dicts ...[]byte) DOption { if err != nil { return err } - o.dicts = append(o.dicts, *d) + o.dicts = append(o.dicts, d) } return nil } } +// WithEncoderDictRaw registers a dictionary that may be used by the decoder. +// The slice content can be arbitrary data. +func WithDecoderDictRaw(id uint32, content []byte) DOption { + return func(o *decoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}) + return nil + } +} + // WithDecoderMaxWindow allows to set a maximum window size for decodes. // This allows rejecting packets that will cause big memory usage. // The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. diff --git a/test/integration/vendor/github.com/klauspost/compress/zstd/dict.go b/test/integration/vendor/github.com/klauspost/compress/zstd/dict.go index b2725f77b5..66a95c18ef 100644 --- a/test/integration/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/test/integration/vendor/github.com/klauspost/compress/zstd/dict.go @@ -21,6 +21,9 @@ type dict struct { const dictMagic = "\x37\xa4\x30\xec" +// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB. +const dictMaxLength = 1 << 31 + // ID returns the dictionary id or 0 if d is nil. func (d *dict) ID() uint32 { if d == nil { diff --git a/test/integration/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/test/integration/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 6015f498af..8e15be2f7f 100644 --- a/test/integration/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/test/integration/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "math" + "math/bits" "runtime" "strings" ) @@ -305,7 +306,13 @@ func WithLowerEncoderMem(b bool) EOption { } // WithEncoderDict allows to register a dictionary that will be used for the encode. +// +// The slice dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// // The encoder *may* choose to use no dictionary instead for certain payloads. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format func WithEncoderDict(dict []byte) EOption { return func(o *encoderOptions) error { d, err := loadDict(dict) @@ -316,3 +323,17 @@ func WithEncoderDict(dict []byte) EOption { return nil } } + +// WithEncoderDictRaw registers a dictionary that may be used by the encoder. +// +// The slice content may contain arbitrary data. It will be used as an initial +// history. +func WithEncoderDictRaw(id uint32, content []byte) EOption { + return func(o *encoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} + return nil + } +} diff --git a/test/integration/vendor/github.com/klauspost/compress/zstd/framedec.go b/test/integration/vendor/github.com/klauspost/compress/zstd/framedec.go index 6cb3b4e55f..d8e8a05bd7 100644 --- a/test/integration/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/test/integration/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -29,7 +29,7 @@ type frameDec struct { FrameContentSize uint64 - DictionaryID *uint32 + DictionaryID uint32 HasCheckSum bool SingleSegment bool } @@ -155,7 +155,7 @@ func (d *frameDec) reset(br byteBuffer) error { // Read Dictionary_ID // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - d.DictionaryID = nil + d.DictionaryID = 0 if size := fhd & 3; size != 0 { if size == 3 { size = 4 @@ -167,7 +167,7 @@ func (d *frameDec) reset(br byteBuffer) error { return err } var id uint32 - switch size { + switch len(b) { case 1: id = uint32(b[0]) case 2: @@ -178,11 +178,7 @@ func (d *frameDec) reset(br byteBuffer) error { if debugDecoder { println("Dict size", size, "ID:", id) } - if id > 0 { - // ID 0 means "sorry, no dictionary anyway". - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format - d.DictionaryID = &id - } + d.DictionaryID = id } // Read Frame_Content_Size @@ -204,7 +200,7 @@ func (d *frameDec) reset(br byteBuffer) error { println("Reading Frame content", err) return err } - switch fcsSize { + switch len(b) { case 1: d.FrameContentSize = uint64(b[0]) case 2: diff --git a/test/integration/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/test/integration/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index 52e5703c26..b94993a072 100644 --- a/test/integration/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/test/integration/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -320,10 +320,6 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET - // Return with not enough output space error - MOVQ $0x00000005, ret+24(FP) - RET - // func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: CMOV TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 @@ -617,10 +613,6 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET - // Return with not enough output space error - MOVQ $0x00000005, ret+24(FP) - RET - // func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: BMI, BMI2, CMOV TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 @@ -897,10 +889,6 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET - // Return with not enough output space error - MOVQ $0x00000005, ret+24(FP) - RET - // func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: BMI, BMI2, CMOV TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 @@ -1152,10 +1140,6 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET - // Return with not enough output space error - MOVQ $0x00000005, ret+24(FP) - RET - // func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool // Requires: SSE TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 @@ -1389,8 +1373,7 @@ loop_finished: MOVQ ctx+0(FP), AX MOVQ DX, 24(AX) MOVQ DI, 104(AX) - MOVQ 80(AX), CX - SUBQ CX, SI + SUBQ 80(AX), SI MOVQ SI, 112(AX) RET @@ -1402,8 +1385,7 @@ error_match_off_too_big: MOVQ ctx+0(FP), AX MOVQ DX, 24(AX) MOVQ DI, 104(AX) - MOVQ 80(AX), CX - SUBQ CX, SI + SUBQ 80(AX), SI MOVQ SI, 112(AX) RET @@ -1747,8 +1729,7 @@ loop_finished: MOVQ ctx+0(FP), AX MOVQ DX, 24(AX) MOVQ DI, 104(AX) - MOVQ 80(AX), CX - SUBQ CX, SI + SUBQ 80(AX), SI MOVQ SI, 112(AX) RET @@ -1760,8 +1741,7 @@ error_match_off_too_big: MOVQ ctx+0(FP), AX MOVQ DX, 24(AX) MOVQ DI, 104(AX) - MOVQ 80(AX), CX - SUBQ CX, SI + SUBQ 80(AX), SI MOVQ SI, 112(AX) RET diff --git a/test/integration/vendor/github.com/klauspost/compress/zstd/zstd.go b/test/integration/vendor/github.com/klauspost/compress/zstd/zstd.go index b1886f7c74..5ffa82f5ac 100644 --- a/test/integration/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/test/integration/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -72,7 +72,6 @@ var ( ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") // ErrUnknownDictionary is returned if the dictionary ID is unknown. - // For the time being dictionaries are not supported. ErrUnknownDictionary = errors.New("unknown dictionary") // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. diff --git a/test/integration/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/test/integration/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go index 2f3ab92494..7bd161e48a 100644 --- a/test/integration/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go +++ b/test/integration/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/rpc/error_details.proto package errdetails @@ -36,6 +36,112 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// Describes the cause of the error with structured details. +// +// Example of an error when contacting the "pubsub.googleapis.com" API when it +// is not enabled: +// +// { "reason": "API_DISABLED" +// "domain": "googleapis.com" +// "metadata": { +// "resource": "projects/123", +// "service": "pubsub.googleapis.com" +// } +// } +// +// This response indicates that the pubsub.googleapis.com API is not enabled. +// +// Example of an error that is returned when attempting to create a Spanner +// instance in a region that is out of stock: +// +// { "reason": "STOCKOUT" +// "domain": "spanner.googleapis.com", +// "metadata": { +// "availableRegions": "us-central1,us-east2" +// } +// } +type ErrorInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The reason of the error. This is a constant value that identifies the + // proximate cause of the error. Error reasons are unique within a particular + // domain of errors. This should be at most 63 characters and match a + // regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`, which represents + // UPPER_SNAKE_CASE. + Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"` + // The logical grouping to which the "reason" belongs. The error domain + // is typically the registered service name of the tool or product that + // generates the error. Example: "pubsub.googleapis.com". If the error is + // generated by some common infrastructure, the error domain must be a + // globally unique value that identifies the infrastructure. For Google API + // infrastructure, the error domain is "googleapis.com". + Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` + // Additional structured details about this error. + // + // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in + // length. When identifying the current value of an exceeded limit, the units + // should be contained in the key, not the value. For example, rather than + // {"instanceLimit": "100/request"}, should be returned as, + // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of + // instances that can be created in a single (batch) request. + Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ErrorInfo) Reset() { + *x = ErrorInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorInfo) ProtoMessage() {} + +func (x *ErrorInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorInfo.ProtoReflect.Descriptor instead. +func (*ErrorInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{0} +} + +func (x *ErrorInfo) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *ErrorInfo) GetDomain() string { + if x != nil { + return x.Domain + } + return "" +} + +func (x *ErrorInfo) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + // Describes when the clients can retry a failed request. Clients could ignore // the recommendation here or retry when this information is missing from error // responses. @@ -61,7 +167,7 @@ type RetryInfo struct { func (x *RetryInfo) Reset() { *x = RetryInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[0] + mi := &file_google_rpc_error_details_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -74,7 +180,7 @@ func (x *RetryInfo) String() string { func (*RetryInfo) ProtoMessage() {} func (x *RetryInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[0] + mi := &file_google_rpc_error_details_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -87,7 +193,7 @@ func (x *RetryInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use RetryInfo.ProtoReflect.Descriptor instead. func (*RetryInfo) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{0} + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{1} } func (x *RetryInfo) GetRetryDelay() *durationpb.Duration { @@ -112,7 +218,7 @@ type DebugInfo struct { func (x *DebugInfo) Reset() { *x = DebugInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[1] + mi := &file_google_rpc_error_details_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -125,7 +231,7 @@ func (x *DebugInfo) String() string { func (*DebugInfo) ProtoMessage() {} func (x *DebugInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[1] + mi := &file_google_rpc_error_details_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -138,7 +244,7 @@ func (x *DebugInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use DebugInfo.ProtoReflect.Descriptor instead. func (*DebugInfo) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{1} + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{2} } func (x *DebugInfo) GetStackEntries() []string { @@ -178,7 +284,7 @@ type QuotaFailure struct { func (x *QuotaFailure) Reset() { *x = QuotaFailure{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[2] + mi := &file_google_rpc_error_details_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -191,7 +297,7 @@ func (x *QuotaFailure) String() string { func (*QuotaFailure) ProtoMessage() {} func (x *QuotaFailure) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[2] + mi := &file_google_rpc_error_details_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -204,7 +310,7 @@ func (x *QuotaFailure) ProtoReflect() protoreflect.Message { // Deprecated: Use QuotaFailure.ProtoReflect.Descriptor instead. func (*QuotaFailure) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{2} + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3} } func (x *QuotaFailure) GetViolations() []*QuotaFailure_Violation { @@ -214,111 +320,6 @@ func (x *QuotaFailure) GetViolations() []*QuotaFailure_Violation { return nil } -// Describes the cause of the error with structured details. -// -// Example of an error when contacting the "pubsub.googleapis.com" API when it -// is not enabled: -// -// { "reason": "API_DISABLED" -// "domain": "googleapis.com" -// "metadata": { -// "resource": "projects/123", -// "service": "pubsub.googleapis.com" -// } -// } -// -// This response indicates that the pubsub.googleapis.com API is not enabled. -// -// Example of an error that is returned when attempting to create a Spanner -// instance in a region that is out of stock: -// -// { "reason": "STOCKOUT" -// "domain": "spanner.googleapis.com", -// "metadata": { -// "availableRegions": "us-central1,us-east2" -// } -// } -type ErrorInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The reason of the error. This is a constant value that identifies the - // proximate cause of the error. Error reasons are unique within a particular - // domain of errors. This should be at most 63 characters and match - // /[A-Z0-9_]+/. - Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"` - // The logical grouping to which the "reason" belongs. The error domain - // is typically the registered service name of the tool or product that - // generates the error. Example: "pubsub.googleapis.com". If the error is - // generated by some common infrastructure, the error domain must be a - // globally unique value that identifies the infrastructure. For Google API - // infrastructure, the error domain is "googleapis.com". - Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` - // Additional structured details about this error. - // - // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in - // length. When identifying the current value of an exceeded limit, the units - // should be contained in the key, not the value. For example, rather than - // {"instanceLimit": "100/request"}, should be returned as, - // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of - // instances that can be created in a single (batch) request. - Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *ErrorInfo) Reset() { - *x = ErrorInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ErrorInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ErrorInfo) ProtoMessage() {} - -func (x *ErrorInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ErrorInfo.ProtoReflect.Descriptor instead. -func (*ErrorInfo) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3} -} - -func (x *ErrorInfo) GetReason() string { - if x != nil { - return x.Reason - } - return "" -} - -func (x *ErrorInfo) GetDomain() string { - if x != nil { - return x.Domain - } - return "" -} - -func (x *ErrorInfo) GetMetadata() map[string]string { - if x != nil { - return x.Metadata - } - return nil -} - // Describes what preconditions have failed. // // For example, if an RPC failed because it required the Terms of Service to be @@ -495,7 +496,8 @@ type ResourceInfo struct { ResourceType string `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"` // The name of the resource being accessed. For example, a shared calendar // name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current - // error is [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. + // error is + // [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. ResourceName string `protobuf:"bytes,2,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` // The owner of the resource (optional). // For example, "user:" or "project: google.protobuf.Duration - 10, // 1: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation - 11, // 2: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry + 10, // 0: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry + 15, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration + 11, // 2: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation 12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation 13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation 14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link @@ -1089,7 +1125,7 @@ func file_google_rpc_error_details_proto_init() { } if !protoimpl.UnsafeEnabled { file_google_rpc_error_details_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RetryInfo); i { + switch v := v.(*ErrorInfo); i { case 0: return &v.state case 1: @@ -1101,7 +1137,7 @@ func file_google_rpc_error_details_proto_init() { } } file_google_rpc_error_details_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DebugInfo); i { + switch v := v.(*RetryInfo); i { case 0: return &v.state case 1: @@ -1113,7 +1149,7 @@ func file_google_rpc_error_details_proto_init() { } } file_google_rpc_error_details_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QuotaFailure); i { + switch v := v.(*DebugInfo); i { case 0: return &v.state case 1: @@ -1125,7 +1161,7 @@ func file_google_rpc_error_details_proto_init() { } } file_google_rpc_error_details_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ErrorInfo); i { + switch v := v.(*QuotaFailure); i { case 0: return &v.state case 1: @@ -1208,7 +1244,7 @@ func file_google_rpc_error_details_proto_init() { return nil } } - file_google_rpc_error_details_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_rpc_error_details_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*QuotaFailure_Violation); i { case 0: return &v.state diff --git a/test/integration/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/test/integration/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index f34a38e4e9..a6b5081888 100644 --- a/test/integration/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/test/integration/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/rpc/status.proto package status @@ -48,11 +48,13 @@ type Status struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + // The status code, which should be an enum value of + // [google.rpc.Code][google.rpc.Code]. Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` // A developer-facing error message, which should be in English. Any // user-facing error message should be localized and sent in the - // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized + // by the client. Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` // A list of messages that carry the error details. There is a common set of // message types for APIs to use. diff --git a/test/integration/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/test/integration/vendor/google.golang.org/grpc/balancer/grpclb/grpclbstate/state.go similarity index 87% rename from test/integration/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go rename to test/integration/vendor/google.golang.org/grpc/balancer/grpclb/grpclbstate/state.go index 4ecfa1c215..cece046be3 100644 --- a/test/integration/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go +++ b/test/integration/vendor/google.golang.org/grpc/balancer/grpclb/grpclbstate/state.go @@ -16,9 +16,9 @@ * */ -// Package state declares grpclb types to be set by resolvers wishing to pass -// information to grpclb via resolver.State Attributes. -package state +// Package grpclbstate declares grpclb types to be set by resolvers wishing to +// pass information to grpclb via resolver.State Attributes. +package grpclbstate import ( "google.golang.org/grpc/resolver" @@ -27,7 +27,7 @@ import ( // keyType is the key to use for storing State in Attributes. type keyType string -const key = keyType("grpc.grpclb.state") +const key = keyType("grpc.grpclb.grpclbstate") // State contains gRPCLB-relevant data passed from the name resolver. type State struct { diff --git a/test/integration/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/test/integration/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 64a232f281..66d141fce7 100644 --- a/test/integration/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/test/integration/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,14 +18,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" @@ -41,10 +40,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Enumerates the type of event // Note the terminology is different from the RPC semantics // definition, but the same meaning is expressed here. diff --git a/test/integration/vendor/google.golang.org/grpc/clientconn.go b/test/integration/vendor/google.golang.org/grpc/clientconn.go index 422639c79d..0456689045 100644 --- a/test/integration/vendor/google.golang.org/grpc/clientconn.go +++ b/test/integration/vendor/google.golang.org/grpc/clientconn.go @@ -788,10 +788,16 @@ func (cc *ClientConn) incrCallsFailed() { func (ac *addrConn) connect() error { ac.mu.Lock() if ac.state == connectivity.Shutdown { + if logger.V(2) { + logger.Infof("connect called on shutdown addrConn; ignoring.") + } ac.mu.Unlock() return errConnClosing } if ac.state != connectivity.Idle { + if logger.V(2) { + logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state) + } ac.mu.Unlock() return nil } @@ -1268,6 +1274,9 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onGoAway, onClose) if err != nil { + if logger.V(2) { + logger.Infof("Creating new client transport to %q: %v", addr, err) + } // newTr is either nil, or closed. hcancel() channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) diff --git a/test/integration/vendor/google.golang.org/grpc/dialoptions.go b/test/integration/vendor/google.golang.org/grpc/dialoptions.go index 9372dc322e..8f5b536f11 100644 --- a/test/integration/vendor/google.golang.org/grpc/dialoptions.go +++ b/test/integration/vendor/google.golang.org/grpc/dialoptions.go @@ -116,8 +116,9 @@ func newFuncDialOption(f func(*dialOptions)) *funcDialOption { // be twice the size to keep syscalls low. The default value for this buffer is // 32KB. // -// Zero will disable the write buffer such that each write will be on underlying -// connection. Note: A Send call may not directly translate to a write. +// Zero or negative values will disable the write buffer such that each write +// will be on underlying connection. Note: A Send call may not directly +// translate to a write. func WithWriteBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.WriteBufferSize = s @@ -127,8 +128,9 @@ func WithWriteBufferSize(s int) DialOption { // WithReadBufferSize lets you set the size of read buffer, this determines how // much data can be read at most for each read syscall. // -// The default value for this buffer is 32KB. Zero will disable read buffer for -// a connection so data framer can access the underlying conn directly. +// The default value for this buffer is 32KB. Zero or negative values will +// disable read buffer for a connection so data framer can access the +// underlying conn directly. func WithReadBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.ReadBufferSize = s diff --git a/test/integration/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/test/integration/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index a66024d23e..8e29a62f16 100644 --- a/test/integration/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/test/integration/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/health/v1/health.proto package grpc_health_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type HealthCheckResponse_ServingStatus int32 const ( diff --git a/test/integration/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/test/integration/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 179f4a26d1..85e3ff2816 100644 --- a/test/integration/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/test/integration/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -121,7 +121,7 @@ func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated // but not counted towards the size limit. continue } - currentEntryLen := uint64(len(entry.Value)) + currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue())) if currentEntryLen > bytesLimit { break } diff --git a/test/integration/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/test/integration/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 75301c5149..d51302e65c 100644 --- a/test/integration/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/test/integration/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -32,7 +32,7 @@ import ( "sync" "time" - grpclbstate "google.golang.org/grpc/balancer/grpclb/state" + grpclbstate "google.golang.org/grpc/balancer/grpclb/grpclbstate" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" @@ -140,10 +140,10 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts disableServiceConfig: opts.DisableServiceConfig, } - if target.Authority == "" { + if target.URL.Host == "" { d.resolver = defaultResolver } else { - d.resolver, err = customAuthorityResolver(target.Authority) + d.resolver, err = customAuthorityResolver(target.URL.Host) if err != nil { return nil, err } diff --git a/test/integration/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/test/integration/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go index 520d9229e1..c6e08221ff 100644 --- a/test/integration/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go +++ b/test/integration/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -20,13 +20,20 @@ // name without scheme back to gRPC as resolved address. package passthrough -import "google.golang.org/grpc/resolver" +import ( + "errors" + + "google.golang.org/grpc/resolver" +) const scheme = "passthrough" type passthroughBuilder struct{} func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + if target.Endpoint == "" && opts.Dialer == nil { + return nil, errors.New("passthrough: received empty target in Build()") + } r := &passthroughResolver{ target: target, cc: cc, diff --git a/test/integration/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/test/integration/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go index 7f1a702cac..1609116877 100644 --- a/test/integration/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +++ b/test/integration/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -34,8 +34,8 @@ type builder struct { } func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { - if target.Authority != "" { - return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority) + if target.URL.Host != "" { + return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host) } // gRPC was parsing the dial target manually before PR #4817, and we diff --git a/test/integration/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/test/integration/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 409769f48f..aaa9c859a3 100644 --- a/test/integration/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/test/integration/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -191,7 +191,7 @@ type goAway struct { code http2.ErrCode debugData []byte headsUp bool - closeConn bool + closeConn error // if set, loopyWriter will exit, resulting in conn closure } func (*goAway) isTransportResponseFrame() bool { return false } @@ -209,6 +209,14 @@ type outFlowControlSizeRequest struct { func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } +// closeConnection is an instruction to tell the loopy writer to flush the +// framer and exit, which will cause the transport's connection to be closed +// (by the client or server). The transport itself will close after the reader +// encounters the EOF caused by the connection closure. +type closeConnection struct{} + +func (closeConnection) isTransportResponseFrame() bool { return false } + type outStreamState int const ( @@ -408,7 +416,7 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { select { case <-c.ch: case <-c.done: - return nil, ErrConnClosing + return nil, errors.New("transport closed by client") } } } @@ -519,18 +527,6 @@ const minBatchSize = 1000 // As an optimization, to increase the batch size for each flush, loopy yields the processor, once // if the batch size is too low to give stream goroutines a chance to fill it up. func (l *loopyWriter) run() (err error) { - defer func() { - if err == ErrConnClosing { - // Don't log ErrConnClosing as error since it happens - // 1. When the connection is closed by some other known issue. - // 2. User closed the connection. - // 3. A graceful close of connection. - if logger.V(logLevel) { - logger.Infof("transport: loopyWriter.run returning. %v", err) - } - err = nil - } - }() for { it, err := l.cbuf.get(true) if err != nil { @@ -574,7 +570,6 @@ func (l *loopyWriter) run() (err error) { } l.framer.writer.Flush() break hasdata - } } } @@ -662,11 +657,10 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { func (l *loopyWriter) originateStream(str *outStream) error { hdr := str.itl.dequeue().(*headerFrame) if err := hdr.initStream(str.id); err != nil { - if err == ErrConnClosing { - return err + if err == errStreamDrain { // errStreamDrain need not close transport + return nil } - // Other errors(errStreamDrain) need not close transport. - return nil + return err } if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { return err @@ -764,7 +758,7 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { } } if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { - return ErrConnClosing + return errors.New("finished processing active streams while in draining mode") } return nil } @@ -799,7 +793,7 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { if l.side == clientSide { l.draining = true if len(l.estdStreams) == 0 { - return ErrConnClosing + return errors.New("received GOAWAY with no active streams") } } return nil @@ -817,6 +811,14 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { return nil } +func (l *loopyWriter) closeConnectionHandler() error { + l.framer.writer.Flush() + // Exit loopyWriter entirely by returning an error here. This will lead to + // the transport closing the connection, and, ultimately, transport + // closure. + return ErrConnClosing +} + func (l *loopyWriter) handle(i interface{}) error { switch i := i.(type) { case *incomingWindowUpdate: @@ -845,6 +847,8 @@ func (l *loopyWriter) handle(i interface{}) error { return l.goAwayHandler(i) case *outFlowControlSizeRequest: return l.outFlowControlSizeRequestHandler(i) + case closeConnection: + return l.closeConnectionHandler() default: return fmt.Errorf("transport: unknown control message type %T", i) } diff --git a/test/integration/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/test/integration/vendor/google.golang.org/grpc/internal/transport/handler_server.go index fb272235d8..ebe8bfe330 100644 --- a/test/integration/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/test/integration/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -46,24 +46,32 @@ import ( "google.golang.org/grpc/status" ) -// NewServerHandlerTransport returns a ServerTransport handling gRPC -// from inside an http.Handler. It requires that the http Server -// supports HTTP/2. +// NewServerHandlerTransport returns a ServerTransport handling gRPC from +// inside an http.Handler, or writes an HTTP error to w and returns an error. +// It requires that the http Server supports HTTP/2. func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { if r.ProtoMajor != 2 { - return nil, errors.New("gRPC requires HTTP/2") + msg := "gRPC requires HTTP/2" + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } if r.Method != "POST" { - return nil, errors.New("invalid gRPC request method") + msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } contentType := r.Header.Get("Content-Type") // TODO: do we assume contentType is lowercase? we did before contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) if !validContentType { - return nil, errors.New("invalid gRPC request content-type") + msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType) + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } if _, ok := w.(http.Flusher); !ok { - return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + msg := "gRPC requires a ResponseWriter supporting http.Flusher" + http.Error(w, msg, http.StatusInternalServerError) + return nil, errors.New(msg) } st := &serverHandlerTransport{ @@ -79,7 +87,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s if v := r.Header.Get("grpc-timeout"); v != "" { to, err := decodeTimeout(v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) + msg := fmt.Sprintf("malformed time-out: %v", err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } st.timeoutSet = true st.timeout = to @@ -97,7 +107,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s for _, v := range vv { v, err := decodeMetadataHeader(k, v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) + msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } metakv = append(metakv, k, v) } @@ -141,12 +153,15 @@ type serverHandlerTransport struct { stats []stats.Handler } -func (ht *serverHandlerTransport) Close() { - ht.closeOnce.Do(ht.closeCloseChanOnce) +func (ht *serverHandlerTransport) Close(err error) { + ht.closeOnce.Do(func() { + if logger.V(logLevel) { + logger.Infof("Closing serverHandlerTransport: %v", err) + } + close(ht.closedCh) + }) } -func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } - func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } // strAddr is a net.Addr backed by either a TCP "ip:port" string, or @@ -236,7 +251,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro }) } } - ht.Close() + ht.Close(errors.New("finished writing status")) return err } @@ -346,7 +361,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace case <-ht.req.Context().Done(): } cancel() - ht.Close() + ht.Close(errors.New("request is done processing")) }() req := ht.req diff --git a/test/integration/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/test/integration/vendor/google.golang.org/grpc/internal/transport/http2_client.go index d518b07e16..3e582a2853 100644 --- a/test/integration/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/test/integration/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -59,11 +59,15 @@ var clientConnectionCounter uint64 // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { - lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. - ctx context.Context - cancel context.CancelFunc - ctxDone <-chan struct{} // Cache the ctx.Done() chan. - userAgent string + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + // address contains the resolver returned address for this transport. + // If the `ServerName` field is set, it takes precedence over `CallHdr.Host` + // passed to `NewStream`, when determining the :authority header. + address resolver.Address md metadata.MD conn net.Conn // underlying communication channel loopy *loopyWriter @@ -238,8 +242,11 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts go func(conn net.Conn) { defer ctxMonitorDone.Fire() // Signal this goroutine has exited. <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes. - if connectCtx.Err() != nil { + if err := connectCtx.Err(); err != nil { // connectCtx expired before exiting the function. Hard close the connection. + if logger.V(logLevel) { + logger.Infof("newClientTransport: aborting due to connectCtx: %v", err) + } conn.Close() } }(conn) @@ -314,6 +321,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts cancel: cancel, userAgent: opts.UserAgent, registeredCompressors: grpcutil.RegisteredCompressors(), + address: addr, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), @@ -440,10 +448,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts go func() { t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) err := t.loopy.run() - if err != nil { - if logger.V(logLevel) { - logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) - } + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err) } // Do not close the transport. Let reader goroutine handle it since // there might be data in the buffers. @@ -702,6 +708,18 @@ func (e NewStreamError) Error() string { // streams. All non-nil errors returned will be *NewStreamError. func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { ctx = peer.NewContext(ctx, t.getPeer()) + + // ServerName field of the resolver returned address takes precedence over + // Host field of CallHdr to determine the :authority header. This is because, + // the ServerName field takes precedence for server authentication during + // TLS handshake, and the :authority header should match the value used + // for server authentication. + if t.address.ServerName != "" { + newCallHdr := *callHdr + newCallHdr.Host = t.address.ServerName + callHdr = &newCallHdr + } + headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} @@ -934,6 +952,9 @@ func (t *http2Client) Close(err error) { t.mu.Unlock() return } + if logger.V(logLevel) { + logger.Infof("transport: closing: %v", err) + } // Call t.onClose ASAP to prevent the client from attempting to create new // streams. t.onClose() @@ -986,11 +1007,14 @@ func (t *http2Client) GracefulClose() { t.mu.Unlock() return } + if logger.V(logLevel) { + logger.Infof("transport: GracefulClose called") + } t.state = draining active := len(t.activeStreams) t.mu.Unlock() if active == 0 { - t.Close(ErrConnClosing) + t.Close(connectionErrorf(true, nil, "no active streams left to process while draining")) return } t.controlBuf.put(&incomingGoAway{}) diff --git a/test/integration/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/test/integration/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 3dd15647bc..37e089bc84 100644 --- a/test/integration/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/test/integration/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -21,6 +21,7 @@ package transport import ( "bytes" "context" + "errors" "fmt" "io" "math" @@ -41,6 +42,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -101,13 +103,13 @@ type http2Server struct { mu sync.Mutex // guard the following - // drainChan is initialized when Drain() is called the first time. - // After which the server writes out the first GoAway(with ID 2^31-1) frame. - // Then an independent goroutine will be launched to later send the second GoAway. - // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. - // Thus call to Drain() will be a no-op if drainChan is already initialized since draining is - // already underway. - drainChan chan struct{} + // drainEvent is initialized when Drain() is called the first time. After + // which the server writes out the first GoAway(with ID 2^31-1) frame. Then + // an independent goroutine will be launched to later send the second + // GoAway. During this time we don't want to write another first GoAway(with + // ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is + // already initialized since draining is already underway. + drainEvent *grpcsync.Event state transportState activeStreams map[uint32]*Stream // idle is the time instant when the connection went idle. @@ -293,7 +295,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, defer func() { if err != nil { - t.Close() + t.Close(err) } }() @@ -331,10 +333,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, go func() { t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler - if err := t.loopy.run(); err != nil { - if logger.V(logLevel) { - logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) - } + err := t.loopy.run() + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err) } t.conn.Close() t.controlBuf.finish() @@ -344,8 +345,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, return t, nil } -// operateHeader takes action on the decoded headers. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { +// operateHeaders takes action on the decoded headers. Returns an error if fatal +// error encountered and transport needs to close, otherwise returns nil. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -361,15 +363,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeFrameSize, onWrite: func() {}, }) - return false + return nil } if streamID%2 != 1 || streamID <= t.maxStreamID { // illegal gRPC stream id. - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) - } - return true + return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame) } t.maxStreamID = streamID @@ -453,7 +452,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( status: status.New(codes.Internal, errMsg), rst: !frame.StreamEnded(), }) - return false + return nil } if !isGRPC || headerError { @@ -463,7 +462,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeProtocol, onWrite: func() {}, }) - return false + return nil } // "If :authority is missing, Host must be renamed to :authority." - A41 @@ -503,7 +502,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( if t.state != reachable { t.mu.Unlock() s.cancel() - return false + return nil } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() @@ -514,7 +513,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( onWrite: func() {}, }) s.cancel() - return false + return nil } if httpMethod != http.MethodPost { t.mu.Unlock() @@ -530,7 +529,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rst: !frame.StreamEnded(), }) s.cancel() - return false + return nil } if t.inTapHandle != nil { var err error @@ -550,7 +549,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( status: stat, rst: !frame.StreamEnded(), }) - return false + return nil } } t.activeStreams[streamID] = s @@ -597,7 +596,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( wq: s.wq, }) handle(s) - return false + return nil } // HandleStreams receives incoming streams using the given handler. This is @@ -630,19 +629,16 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. continue } if err == io.EOF || err == io.ErrUnexpectedEOF { - t.Close() + t.Close(err) return } - if logger.V(logLevel) { - logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) - } - t.Close() + t.Close(err) return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if t.operateHeaders(frame, handle, traceCtx) { - t.Close() + if err := t.operateHeaders(frame, handle, traceCtx); err != nil { + t.Close(err) break } case *http2.DataFrame: @@ -843,8 +839,8 @@ const ( func (t *http2Server) handlePing(f *http2.PingFrame) { if f.IsAck() { - if f.Data == goAwayPing.data && t.drainChan != nil { - close(t.drainChan) + if f.Data == goAwayPing.data && t.drainEvent != nil { + t.drainEvent.Fire() return } // Maybe it's a BDP ping. @@ -886,10 +882,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { if t.pingStrikes > maxPingStrikes { // Send goaway and close the connection. - if logger.V(logLevel) { - logger.Errorf("transport: Got too many pings from the client, closing the connection.") - } - t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")}) } } @@ -1153,7 +1146,7 @@ func (t *http2Server) keepalive() { if logger.V(logLevel) { logger.Infof("transport: closing server transport due to maximum connection age.") } - t.Close() + t.controlBuf.put(closeConnection{}) case <-t.done: } return @@ -1169,10 +1162,7 @@ func (t *http2Server) keepalive() { continue } if outstandingPing && kpTimeoutLeft <= 0 { - if logger.V(logLevel) { - logger.Infof("transport: closing server transport due to idleness.") - } - t.Close() + t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time)) return } if !outstandingPing { @@ -1199,12 +1189,15 @@ func (t *http2Server) keepalive() { // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. -func (t *http2Server) Close() { +func (t *http2Server) Close(err error) { t.mu.Lock() if t.state == closing { t.mu.Unlock() return } + if logger.V(logLevel) { + logger.Infof("transport: closing: %v", err) + } t.state = closing streams := t.activeStreams t.activeStreams = nil @@ -1295,10 +1288,10 @@ func (t *http2Server) RemoteAddr() net.Addr { func (t *http2Server) Drain() { t.mu.Lock() defer t.mu.Unlock() - if t.drainChan != nil { + if t.drainEvent != nil { return } - t.drainChan = make(chan struct{}) + t.drainEvent = grpcsync.NewEvent() t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) } @@ -1319,19 +1312,20 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { // Stop accepting more streams now. t.state = draining sid := t.maxStreamID + retErr := g.closeConn if len(t.activeStreams) == 0 { - g.closeConn = true + retErr = errors.New("second GOAWAY written and no active streams left to process") } t.mu.Unlock() t.maxStreamMu.Unlock() if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { return false, err } - if g.closeConn { + if retErr != nil { // Abruptly close the connection following the GoAway (via // loopywriter). But flush out what's inside the buffer first. t.framer.writer.Flush() - return false, fmt.Errorf("transport: Connection closing") + return false, retErr } return true, nil } @@ -1353,7 +1347,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { timer := time.NewTimer(time.Minute) defer timer.Stop() select { - case <-t.drainChan: + case <-t.drainEvent.Done(): case <-timer.C: case <-t.done: return diff --git a/test/integration/vendor/google.golang.org/grpc/internal/transport/transport.go b/test/integration/vendor/google.golang.org/grpc/internal/transport/transport.go index 2e615ee20c..6cff20c8e0 100644 --- a/test/integration/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/test/integration/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -701,7 +701,7 @@ type ServerTransport interface { // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. - Close() + Close(err error) // RemoteAddr returns the remote network address. RemoteAddr() net.Addr diff --git a/test/integration/vendor/google.golang.org/grpc/pickfirst.go b/test/integration/vendor/google.golang.org/grpc/pickfirst.go index fb7a99e0a2..b3a55481b9 100644 --- a/test/integration/vendor/google.golang.org/grpc/pickfirst.go +++ b/test/integration/vendor/google.golang.org/grpc/pickfirst.go @@ -102,8 +102,8 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState b.subConn = subConn b.state = connectivity.Idle b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.Idle, - Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}}, + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) b.subConn.Connect() return nil diff --git a/test/integration/vendor/google.golang.org/grpc/regenerate.sh b/test/integration/vendor/google.golang.org/grpc/regenerate.sh index 99db79fafc..a6f26c8ab0 100644 --- a/test/integration/vendor/google.golang.org/grpc/regenerate.sh +++ b/test/integration/vendor/google.golang.org/grpc/regenerate.sh @@ -57,7 +57,8 @@ LEGACY_SOURCES=( ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto profiling/proto/service.proto - reflection/grpc_reflection_v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto ) # Generates only the new gRPC Service symbols @@ -119,8 +120,4 @@ mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/ # see grpc_testing_not_regenerate/README.md for details. rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go -# grpc/testing does not have a go_package option. -mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ -mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/ - cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/test/integration/vendor/google.golang.org/grpc/server.go b/test/integration/vendor/google.golang.org/grpc/server.go index f4dde72b41..2808b7c83e 100644 --- a/test/integration/vendor/google.golang.org/grpc/server.go +++ b/test/integration/vendor/google.golang.org/grpc/server.go @@ -233,10 +233,11 @@ func newJoinServerOption(opts ...ServerOption) ServerOption { return &joinServerOption{opts: opts} } -// WriteBufferSize determines how much data can be batched before doing a write on the wire. -// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. -// The default value for this buffer is 32KB. -// Zero will disable the write buffer such that each write will be on underlying connection. +// WriteBufferSize determines how much data can be batched before doing a write +// on the wire. The corresponding memory allocation for this buffer will be +// twice the size to keep syscalls low. The default value for this buffer is +// 32KB. Zero or negative values will disable the write buffer such that each +// write will be on underlying connection. // Note: A Send call may not directly translate to a write. func WriteBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { @@ -244,11 +245,10 @@ func WriteBufferSize(s int) ServerOption { }) } -// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most -// for one read syscall. -// The default value for this buffer is 32KB. -// Zero will disable read buffer for a connection so data framer can access the underlying -// conn directly. +// ReadBufferSize lets you set the size of read buffer, this determines how much +// data can be read at most for one read syscall. The default value for this +// buffer is 32KB. Zero or negative values will disable read buffer for a +// connection so data framer can access the underlying conn directly. func ReadBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.readBufferSize = s @@ -942,7 +942,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { } func (s *Server) serveStreams(st transport.ServerTransport) { - defer st.Close() + defer st.Close(errors.New("finished serving streams for the server transport")) var wg sync.WaitGroup var roundRobinCounter uint32 @@ -1008,7 +1008,8 @@ var _ http.Handler = (*Server)(nil) func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + // Errors returned from transport.NewServerHandlerTransport have + // already been written to w. return } if !s.addConn(listenerAddressForServeHTTP, st) { @@ -1046,7 +1047,7 @@ func (s *Server) addConn(addr string, st transport.ServerTransport) bool { s.mu.Lock() defer s.mu.Unlock() if s.conns == nil { - st.Close() + st.Close(errors.New("Server.addConn called when server has already been stopped")) return false } if s.drain { @@ -1150,21 +1151,16 @@ func chainUnaryServerInterceptors(s *Server) { func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { - // the struct ensures the variables are allocated together, rather than separately, since we - // know they should be garbage collected together. This saves 1 allocation and decreases - // time/call by about 10% on the microbenchmark. - var state struct { - i int - next UnaryHandler - } - state.next = func(ctx context.Context, req interface{}) (interface{}, error) { - if state.i == len(interceptors)-1 { - return interceptors[state.i](ctx, req, info, handler) - } - state.i++ - return interceptors[state.i-1](ctx, req, info, state.next) - } - return state.next(ctx, req) + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) + } +} + +func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(ctx context.Context, req interface{}) (interface{}, error) { + return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) } } @@ -1470,21 +1466,16 @@ func chainStreamServerInterceptors(s *Server) { func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { - // the struct ensures the variables are allocated together, rather than separately, since we - // know they should be garbage collected together. This saves 1 allocation and decreases - // time/call by about 10% on the microbenchmark. - var state struct { - i int - next StreamHandler - } - state.next = func(srv interface{}, ss ServerStream) error { - if state.i == len(interceptors)-1 { - return interceptors[state.i](srv, ss, info, handler) - } - state.i++ - return interceptors[state.i-1](srv, ss, info, state.next) - } - return state.next(srv, ss) + return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) + } +} + +func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(srv interface{}, stream ServerStream) error { + return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) } } @@ -1819,7 +1810,7 @@ func (s *Server) Stop() { } for _, cs := range conns { for st := range cs { - st.Close() + st.Close(errors.New("Server.Stop called")) } } if s.opts.numServerWorkers > 0 { diff --git a/test/integration/vendor/google.golang.org/grpc/stream.go b/test/integration/vendor/google.golang.org/grpc/stream.go index 960c3e33df..0f8e6c0149 100644 --- a/test/integration/vendor/google.golang.org/grpc/stream.go +++ b/test/integration/vendor/google.golang.org/grpc/stream.go @@ -416,7 +416,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) ctx = trace.NewContext(ctx, trInfo.tr) } - if cs.cc.parsedTarget.Scheme == "xds" { + if cs.cc.parsedTarget.URL.Scheme == "xds" { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( diff --git a/test/integration/vendor/google.golang.org/grpc/version.go b/test/integration/vendor/google.golang.org/grpc/version.go index 2198e7098d..243e06e8d3 100644 --- a/test/integration/vendor/google.golang.org/grpc/version.go +++ b/test/integration/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.51.0" +const Version = "1.52.0" diff --git a/test/integration/vendor/google.golang.org/grpc/vet.sh b/test/integration/vendor/google.golang.org/grpc/vet.sh index bd8e0cdb33..1d03c09148 100644 --- a/test/integration/vendor/google.golang.org/grpc/vet.sh +++ b/test/integration/vendor/google.golang.org/grpc/vet.sh @@ -121,8 +121,9 @@ done # # TODO(dfawley): don't use deprecated functions in examples or first-party # plugins. +# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs. SC_OUT="$(mktemp)" -staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true +staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true # Error if anything other than deprecation warnings are printed. not grep -v "is deprecated:.*SA1019" "${SC_OUT}" # Only ignore the following deprecated types/fields/functions. diff --git a/test/integration/vendor/modules.txt b/test/integration/vendor/modules.txt index 341bffff51..30d642f59a 100644 --- a/test/integration/vendor/modules.txt +++ b/test/integration/vendor/modules.txt @@ -1,4 +1,4 @@ -# cloud.google.com/go/compute/metadata v0.2.1 +# cloud.google.com/go/compute/metadata v0.2.3 ## explicit; go 1.19 # github.com/AlecAivazis/survey/v2 v2.3.6 ## explicit; go 1.13 @@ -326,7 +326,7 @@ github.com/json-iterator/go # github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 ## explicit github.com/kballard/go-shellquote -# github.com/klauspost/compress v1.15.13 +# github.com/klauspost/compress v1.15.15 ## explicit; go 1.17 github.com/klauspost/compress github.com/klauspost/compress/fse @@ -533,7 +533,7 @@ github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/spf13/viper v1.14.0 +# github.com/spf13/viper v1.15.0 ## explicit; go 1.17 # github.com/stretchr/testify v1.8.1 ## explicit; go 1.13 @@ -741,20 +741,20 @@ google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 +# google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef ## explicit; go 1.19 google.golang.org/genproto/googleapis/api/httpbody google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.51.0 +# google.golang.org/grpc v1.52.0 ## explicit; go 1.17 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base -google.golang.org/grpc/balancer/grpclb/state +google.golang.org/grpc/balancer/grpclb/grpclbstate google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/channelz diff --git a/test/performance/advanced-metrics/Makefile b/test/performance/advanced-metrics/Makefile index a40bc5c814..132fd1cc5b 100644 --- a/test/performance/advanced-metrics/Makefile +++ b/test/performance/advanced-metrics/Makefile @@ -1,3 +1,5 @@ +include ../../../Makefile.containers + SHELL:=/bin/bash #uncomment when you want to use go modules @@ -13,11 +15,11 @@ benchmark_build: benchmark_run: benchmark_build source .test_env && \ - docker-compose -f docker-compose.yml up -d --build --remove-orphans + ${CONTAINER_COMPOSE} -f docker-compose.yml up -d --build --remove-orphans benchmark_stop: source .test_env && \ - docker-compose -f docker-compose.yml down + ${CONTAINER_COMPOSE} -f docker-compose.yml down benchmark_poc_build: CGO_ENABLED=0 GOOS=linux go build -o bin/agent-with-poc fake_agent_poc/*.go @@ -25,7 +27,7 @@ benchmark_poc_build: benchmark_poc_run: benchmark_poc_build source .test_env && \ - docker-compose -f docker-compose-with-poc.yml up --build --remove-orphans + ${CONTAINER_COMPOSE} -f docker-compose-with-poc.yml up --build --remove-orphans benchmark_avr_build: CGO_ENABLED=0 GOOS=linux go build -o bin/avr_harness avr_harness/*.go @@ -33,8 +35,8 @@ benchmark_avr_build: benchmark_avr_run: benchmark_avr_build source .test_env && \ - docker-compose -f docker-compose-with-avr.yml up -d --build --remove-orphans + ${CONTAINER_COMPOSE} -f docker-compose-with-avr.yml up -d --build --remove-orphans benchmark_avr_stop: source .test_env && \ - docker-compose -f docker-compose-with-avr.yml down \ No newline at end of file + ${CONTAINER_COMPOSE} -f docker-compose-with-avr.yml down diff --git a/test/performance/go.mod b/test/performance/go.mod index 2245d8e213..0bd27b8965 100644 --- a/test/performance/go.mod +++ b/test/performance/go.mod @@ -15,7 +15,7 @@ require ( github.com/sirupsen/logrus v1.9.0 github.com/stretchr/testify v1.8.1 go.uber.org/atomic v1.10.0 - google.golang.org/grpc v1.51.0 + google.golang.org/grpc v1.52.0 ) require ( @@ -31,7 +31,7 @@ require ( github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/klauspost/compress v1.15.13 // indirect + github.com/klauspost/compress v1.15.15 // indirect github.com/klauspost/cpuid/v2 v2.1.0 // indirect github.com/lufia/plan9stats v0.0.0-20220517141722-cf486979b281 // indirect github.com/magiconair/properties v1.8.7 // indirect @@ -46,24 +46,23 @@ require ( github.com/nginxinc/nginx-prometheus-exporter v0.10.0 // indirect github.com/nxadm/tail v1.4.8 // indirect github.com/orcaman/concurrent-map v1.0.0 // indirect - github.com/pelletier/go-toml v1.9.5 // indirect - github.com/pelletier/go-toml/v2 v2.0.5 // indirect + github.com/pelletier/go-toml/v2 v2.0.6 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect - github.com/rs/cors v1.8.2 // indirect + github.com/rs/cors v1.8.3 // indirect github.com/shirou/gopsutil v3.21.11+incompatible // indirect github.com/shirou/gopsutil/v3 v3.22.7 // indirect - github.com/spf13/afero v1.9.2 // indirect + github.com/spf13/afero v1.9.3 // indirect github.com/spf13/cast v1.5.0 // indirect github.com/spf13/cobra v1.6.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/spf13/viper v1.14.0 // indirect + github.com/spf13/viper v1.15.0 // indirect github.com/stretchr/objx v0.5.0 // indirect - github.com/subosito/gotenv v1.4.1 // indirect + github.com/subosito/gotenv v1.4.2 // indirect github.com/tklauser/go-sysconf v0.3.10 // indirect github.com/tklauser/numcpus v0.5.0 // indirect github.com/trivago/grok v1.0.0 // indirect @@ -75,7 +74,7 @@ require ( golang.org/x/sys v0.4.0 // indirect golang.org/x/text v0.6.0 // indirect golang.org/x/time v0.1.0 // indirect - google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 // indirect + google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/mcuadros/go-syslog.v2 v2.3.0 // indirect diff --git a/test/performance/go.sum b/test/performance/go.sum index f190626298..b124d5209e 100644 --- a/test/performance/go.sum +++ b/test/performance/go.sum @@ -189,8 +189,8 @@ github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dv github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.15.13 h1:NFn1Wr8cfnenSJSA46lLq4wHCcBzKTSjnBIexDMMOV0= -github.com/klauspost/compress v1.15.13/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= +github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= github.com/klauspost/cpuid/v2 v2.1.0 h1:eyi1Ad2aNJMW95zcSbmGg7Cg6cq3ADwLpMAP96d8rF0= github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -253,10 +253,8 @@ github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5h github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/orcaman/concurrent-map v1.0.0 h1:I/2A2XPCb4IuQWcQhBhSwGfiuybl/J0ev9HDbW65HOY= github.com/orcaman/concurrent-map v1.0.0/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI= -github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= -github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= -github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= +github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU= +github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -294,8 +292,8 @@ github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5 github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= -github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= -github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/cors v1.8.3 h1:O+qNyWn7Z+F9M0ILBHgMVPuB1xTOucVd5gtaYyXBpRo= +github.com/rs/cors v1.8.3/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sanity-io/litter v1.5.5 h1:iE+sBxPBzoK6uaEP5Lt3fHNgpKcHXc/A2HGETy0uJQo= github.com/sanity-io/litter v1.5.5/go.mod h1:9gzJgR2i4ZpjZHsKvUXIRQVk7P+yM3e+jAF7bU2UI5U= @@ -309,8 +307,8 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw= -github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk= +github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= @@ -319,8 +317,8 @@ github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmq github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.14.0 h1:Rg7d3Lo706X9tHsJMUjdiwMpHB7W8WnSVOssIY+JElU= -github.com/spf13/viper v1.14.0/go.mod h1:WT//axPky3FdvXHzGw33dNdXXXfFQqmEalje+egj8As= +github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= +github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -336,8 +334,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= -github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= +github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= @@ -677,8 +675,8 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 h1:jmIfw8+gSvXcZSgaFAGyInDXeWzUhvYH57G/5GKMn70= -google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef h1:uQ2vjV/sHTsWSqdKeLqmwitzgvjMl7o4IdtHwUDXSJY= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -695,8 +693,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.52.0 h1:kd48UiU7EHsV4rnLyOJRuP/Il/UHE7gdDAQ+SZI7nZk= +google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/test/performance/vendor/github.com/klauspost/compress/s2/decode.go b/test/performance/vendor/github.com/klauspost/compress/s2/decode.go index 27c0f3c2c4..00c5cc72c2 100644 --- a/test/performance/vendor/github.com/klauspost/compress/s2/decode.go +++ b/test/performance/vendor/github.com/klauspost/compress/s2/decode.go @@ -952,7 +952,11 @@ func (r *Reader) ReadSeeker(random bool, index []byte) (*ReadSeeker, error) { // Seek allows seeking in compressed data. func (r *ReadSeeker) Seek(offset int64, whence int) (int64, error) { if r.err != nil { - return 0, r.err + if !errors.Is(r.err, io.EOF) { + return 0, r.err + } + // Reset on EOF + r.err = nil } if offset == 0 && whence == io.SeekCurrent { return r.blockStart + int64(r.i), nil diff --git a/test/performance/vendor/github.com/pelletier/go-toml/.dockerignore b/test/performance/vendor/github.com/pelletier/go-toml/.dockerignore deleted file mode 100644 index 7b5883475d..0000000000 --- a/test/performance/vendor/github.com/pelletier/go-toml/.dockerignore +++ /dev/null @@ -1,2 +0,0 @@ -cmd/tomll/tomll -cmd/tomljson/tomljson diff --git a/test/performance/vendor/github.com/pelletier/go-toml/.gitignore b/test/performance/vendor/github.com/pelletier/go-toml/.gitignore deleted file mode 100644 index e6ba63a5c5..0000000000 --- a/test/performance/vendor/github.com/pelletier/go-toml/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -test_program/test_program_bin -fuzz/ -cmd/tomll/tomll -cmd/tomljson/tomljson -cmd/tomltestgen/tomltestgen diff --git a/test/performance/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md b/test/performance/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md deleted file mode 100644 index 98b9893d37..0000000000 --- a/test/performance/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md +++ /dev/null @@ -1,132 +0,0 @@ -## Contributing - -Thank you for your interest in go-toml! We appreciate you considering -contributing to go-toml! - -The main goal is the project is to provide an easy-to-use TOML -implementation for Go that gets the job done and gets out of your way – -dealing with TOML is probably not the central piece of your project. - -As the single maintainer of go-toml, time is scarce. All help, big or -small, is more than welcomed! - -### Ask questions - -Any question you may have, somebody else might have it too. Always feel -free to ask them on the [issues tracker][issues-tracker]. We will try to -answer them as clearly and quickly as possible, time permitting. - -Asking questions also helps us identify areas where the documentation needs -improvement, or new features that weren't envisioned before. Sometimes, a -seemingly innocent question leads to the fix of a bug. Don't hesitate and -ask away! - -### Improve the documentation - -The best way to share your knowledge and experience with go-toml is to -improve the documentation. Fix a typo, clarify an interface, add an -example, anything goes! - -The documentation is present in the [README][readme] and thorough the -source code. On release, it gets updated on [pkg.go.dev][pkg.go.dev]. To make a -change to the documentation, create a pull request with your proposed -changes. For simple changes like that, the easiest way to go is probably -the "Fork this project and edit the file" button on Github, displayed at -the top right of the file. Unless it's a trivial change (for example a -typo), provide a little bit of context in your pull request description or -commit message. - -### Report a bug - -Found a bug! Sorry to hear that :(. Help us and other track them down and -fix by reporting it. [File a new bug report][bug-report] on the [issues -tracker][issues-tracker]. The template should provide enough guidance on -what to include. When in doubt: add more details! By reducing ambiguity and -providing more information, it decreases back and forth and saves everyone -time. - -### Code changes - -Want to contribute a patch? Very happy to hear that! - -First, some high-level rules: - -* A short proposal with some POC code is better than a lengthy piece of - text with no code. Code speaks louder than words. -* No backward-incompatible patch will be accepted unless discussed. - Sometimes it's hard, and Go's lack of versioning by default does not - help, but we try not to break people's programs unless we absolutely have - to. -* If you are writing a new feature or extending an existing one, make sure - to write some documentation. -* Bug fixes need to be accompanied with regression tests. -* New code needs to be tested. -* Your commit messages need to explain why the change is needed, even if - already included in the PR description. - -It does sound like a lot, but those best practices are here to save time -overall and continuously improve the quality of the project, which is -something everyone benefits from. - -#### Get started - -The fairly standard code contribution process looks like that: - -1. [Fork the project][fork]. -2. Make your changes, commit on any branch you like. -3. [Open up a pull request][pull-request] -4. Review, potential ask for changes. -5. Merge. You're in! - -Feel free to ask for help! You can create draft pull requests to gather -some early feedback! - -#### Run the tests - -You can run tests for go-toml using Go's test tool: `go test ./...`. -When creating a pull requests, all tests will be ran on Linux on a few Go -versions (Travis CI), and on Windows using the latest Go version -(AppVeyor). - -#### Style - -Try to look around and follow the same format and structure as the rest of -the code. We enforce using `go fmt` on the whole code base. - ---- - -### Maintainers-only - -#### Merge pull request - -Checklist: - -* Passing CI. -* Does not introduce backward-incompatible changes (unless discussed). -* Has relevant doc changes. -* Has relevant unit tests. - -1. Merge using "squash and merge". -2. Make sure to edit the commit message to keep all the useful information - nice and clean. -3. Make sure the commit title is clear and contains the PR number (#123). - -#### New release - -1. Go to [releases][releases]. Click on "X commits to master since this - release". -2. Make note of all the changes. Look for backward incompatible changes, - new features, and bug fixes. -3. Pick the new version using the above and semver. -4. Create a [new release][new-release]. -5. Follow the same format as [1.1.0][release-110]. - -[issues-tracker]: https://github.com/pelletier/go-toml/issues -[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md -[pkg.go.dev]: https://pkg.go.dev/github.com/pelletier/go-toml -[readme]: ./README.md -[fork]: https://help.github.com/articles/fork-a-repo -[pull-request]: https://help.github.com/en/articles/creating-a-pull-request -[releases]: https://github.com/pelletier/go-toml/releases -[new-release]: https://github.com/pelletier/go-toml/releases/new -[release-110]: https://github.com/pelletier/go-toml/releases/tag/v1.1.0 diff --git a/test/performance/vendor/github.com/pelletier/go-toml/Dockerfile b/test/performance/vendor/github.com/pelletier/go-toml/Dockerfile deleted file mode 100644 index fffdb01666..0000000000 --- a/test/performance/vendor/github.com/pelletier/go-toml/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM golang:1.12-alpine3.9 as builder -WORKDIR /go/src/github.com/pelletier/go-toml -COPY . . -ENV CGO_ENABLED=0 -ENV GOOS=linux -RUN go install ./... - -FROM scratch -COPY --from=builder /go/bin/tomll /usr/bin/tomll -COPY --from=builder /go/bin/tomljson /usr/bin/tomljson -COPY --from=builder /go/bin/jsontoml /usr/bin/jsontoml diff --git a/test/performance/vendor/github.com/pelletier/go-toml/Makefile b/test/performance/vendor/github.com/pelletier/go-toml/Makefile deleted file mode 100644 index 9e4503aea6..0000000000 --- a/test/performance/vendor/github.com/pelletier/go-toml/Makefile +++ /dev/null @@ -1,29 +0,0 @@ -export CGO_ENABLED=0 -go := go -go.goos ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f1) -go.goarch ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f2) - -out.tools := tomll tomljson jsontoml -out.dist := $(out.tools:=_$(go.goos)_$(go.goarch).tar.xz) -sources := $(wildcard **/*.go) - - -.PHONY: -tools: $(out.tools) - -$(out.tools): $(sources) - GOOS=$(go.goos) GOARCH=$(go.goarch) $(go) build ./cmd/$@ - -.PHONY: -dist: $(out.dist) - -$(out.dist):%_$(go.goos)_$(go.goarch).tar.xz: % - if [ "$(go.goos)" = "windows" ]; then \ - tar -cJf $@ $^.exe; \ - else \ - tar -cJf $@ $^; \ - fi - -.PHONY: -clean: - rm -rf $(out.tools) $(out.dist) diff --git a/test/performance/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md b/test/performance/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 041cdc4a2f..0000000000 --- a/test/performance/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,5 +0,0 @@ -**Issue:** add link to pelletier/go-toml issue here - -Explanation of what this pull request does. - -More detailed description of the decisions being made and the reasons why (if the patch is non-trivial). diff --git a/test/performance/vendor/github.com/pelletier/go-toml/README.md b/test/performance/vendor/github.com/pelletier/go-toml/README.md deleted file mode 100644 index 7399e04bf6..0000000000 --- a/test/performance/vendor/github.com/pelletier/go-toml/README.md +++ /dev/null @@ -1,176 +0,0 @@ -# go-toml - -Go library for the [TOML](https://toml.io/) format. - -This library supports TOML version -[v1.0.0-rc.3](https://toml.io/en/v1.0.0-rc.3) - -[![Go Reference](https://pkg.go.dev/badge/github.com/pelletier/go-toml.svg)](https://pkg.go.dev/github.com/pelletier/go-toml) -[![license](https://img.shields.io/github/license/pelletier/go-toml.svg)](https://github.com/pelletier/go-toml/blob/master/LICENSE) -[![Build Status](https://dev.azure.com/pelletierthomas/go-toml-ci/_apis/build/status/pelletier.go-toml?branchName=master)](https://dev.azure.com/pelletierthomas/go-toml-ci/_build/latest?definitionId=1&branchName=master) -[![codecov](https://codecov.io/gh/pelletier/go-toml/branch/master/graph/badge.svg)](https://codecov.io/gh/pelletier/go-toml) -[![Go Report Card](https://goreportcard.com/badge/github.com/pelletier/go-toml)](https://goreportcard.com/report/github.com/pelletier/go-toml) -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml?ref=badge_shield) - - -## Development status - -**ℹ️ Consider go-toml v2!** - -The next version of go-toml is in [active development][v2-dev], and -[nearing completion][v2-map]. - -Though technically in beta, v2 is already more tested, [fixes bugs][v1-bugs], -and [much faster][v2-bench]. If you only need reading and writing TOML documents -(majority of cases), those features are implemented and the API unlikely to -change. - -The remaining features will be added shortly. While pull-requests are welcome on -v1, no active development is expected on it. When v2.0.0 is released, v1 will be -deprecated. - -👉 [go-toml v2][v2] - -[v2]: https://github.com/pelletier/go-toml/tree/v2 -[v2-map]: https://github.com/pelletier/go-toml/discussions/506 -[v2-dev]: https://github.com/pelletier/go-toml/tree/v2 -[v1-bugs]: https://github.com/pelletier/go-toml/issues?q=is%3Aissue+is%3Aopen+label%3Av2-fixed -[v2-bench]: https://github.com/pelletier/go-toml/tree/v2#benchmarks - -## Features - -Go-toml provides the following features for using data parsed from TOML documents: - -* Load TOML documents from files and string data -* Easily navigate TOML structure using Tree -* Marshaling and unmarshaling to and from data structures -* Line & column position data for all parsed elements -* [Query support similar to JSON-Path](query/) -* Syntax errors contain line and column numbers - -## Import - -```go -import "github.com/pelletier/go-toml" -``` - -## Usage example - -Read a TOML document: - -```go -config, _ := toml.Load(` -[postgres] -user = "pelletier" -password = "mypassword"`) -// retrieve data directly -user := config.Get("postgres.user").(string) - -// or using an intermediate object -postgresConfig := config.Get("postgres").(*toml.Tree) -password := postgresConfig.Get("password").(string) -``` - -Or use Unmarshal: - -```go -type Postgres struct { - User string - Password string -} -type Config struct { - Postgres Postgres -} - -doc := []byte(` -[Postgres] -User = "pelletier" -Password = "mypassword"`) - -config := Config{} -toml.Unmarshal(doc, &config) -fmt.Println("user=", config.Postgres.User) -``` - -Or use a query: - -```go -// use a query to gather elements without walking the tree -q, _ := query.Compile("$..[user,password]") -results := q.Execute(config) -for ii, item := range results.Values() { - fmt.Printf("Query result %d: %v\n", ii, item) -} -``` - -## Documentation - -The documentation and additional examples are available at -[pkg.go.dev](https://pkg.go.dev/github.com/pelletier/go-toml). - -## Tools - -Go-toml provides three handy command line tools: - -* `tomll`: Reads TOML files and lints them. - - ``` - go install github.com/pelletier/go-toml/cmd/tomll - tomll --help - ``` -* `tomljson`: Reads a TOML file and outputs its JSON representation. - - ``` - go install github.com/pelletier/go-toml/cmd/tomljson - tomljson --help - ``` - - * `jsontoml`: Reads a JSON file and outputs a TOML representation. - - ``` - go install github.com/pelletier/go-toml/cmd/jsontoml - jsontoml --help - ``` - -### Docker image - -Those tools are also available as a Docker image from -[dockerhub](https://hub.docker.com/r/pelletier/go-toml). For example, to -use `tomljson`: - -``` -docker run -v $PWD:/workdir pelletier/go-toml tomljson /workdir/example.toml -``` - -Only master (`latest`) and tagged versions are published to dockerhub. You -can build your own image as usual: - -``` -docker build -t go-toml . -``` - -## Contribute - -Feel free to report bugs and patches using GitHub's pull requests system on -[pelletier/go-toml](https://github.com/pelletier/go-toml). Any feedback would be -much appreciated! - -### Run tests - -`go test ./...` - -### Fuzzing - -The script `./fuzz.sh` is available to -run [go-fuzz](https://github.com/dvyukov/go-fuzz) on go-toml. - -## Versioning - -Go-toml follows [Semantic Versioning](http://semver.org/). The supported version -of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of -this document. The last two major versions of Go are supported -(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)). - -## License - -The MIT License (MIT) + Apache 2.0. Read [LICENSE](LICENSE). diff --git a/test/performance/vendor/github.com/pelletier/go-toml/SECURITY.md b/test/performance/vendor/github.com/pelletier/go-toml/SECURITY.md deleted file mode 100644 index b2f21cfc92..0000000000 --- a/test/performance/vendor/github.com/pelletier/go-toml/SECURITY.md +++ /dev/null @@ -1,19 +0,0 @@ -# Security Policy - -## Supported Versions - -Use this section to tell people about which versions of your project are -currently being supported with security updates. - -| Version | Supported | -| ---------- | ------------------ | -| Latest 2.x | :white_check_mark: | -| All 1.x | :x: | -| All 0.x | :x: | - -## Reporting a Vulnerability - -Email a vulnerability report to `security@pelletier.codes`. Make sure to include -as many details as possible to reproduce the vulnerability. This is a -side-project: I will try to get back to you as quickly as possible, time -permitting in my personal life. Providing a working patch helps very much! diff --git a/test/performance/vendor/github.com/pelletier/go-toml/azure-pipelines.yml b/test/performance/vendor/github.com/pelletier/go-toml/azure-pipelines.yml deleted file mode 100644 index 4af198b4d9..0000000000 --- a/test/performance/vendor/github.com/pelletier/go-toml/azure-pipelines.yml +++ /dev/null @@ -1,188 +0,0 @@ -trigger: -- master - -stages: -- stage: run_checks - displayName: "Check" - dependsOn: [] - jobs: - - job: fmt - displayName: "fmt" - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go 1.16" - inputs: - version: "1.16" - - task: Go@0 - displayName: "go fmt ./..." - inputs: - command: 'custom' - customCommand: 'fmt' - arguments: './...' - - job: coverage - displayName: "coverage" - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go 1.16" - inputs: - version: "1.16" - - task: Go@0 - displayName: "Generate coverage" - inputs: - command: 'test' - arguments: "-race -coverprofile=coverage.txt -covermode=atomic" - - task: Bash@3 - inputs: - targetType: 'inline' - script: 'bash <(curl -s https://codecov.io/bash) -t ${CODECOV_TOKEN}' - env: - CODECOV_TOKEN: $(CODECOV_TOKEN) - - job: benchmark - displayName: "benchmark" - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go 1.16" - inputs: - version: "1.16" - - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" - - task: Bash@3 - inputs: - filePath: './benchmark.sh' - arguments: "master $(Build.Repository.Uri)" - - - job: go_unit_tests - displayName: "unit tests" - strategy: - matrix: - linux 1.16: - goVersion: '1.16' - imageName: 'ubuntu-latest' - mac 1.16: - goVersion: '1.16' - imageName: 'macOS-latest' - windows 1.16: - goVersion: '1.16' - imageName: 'windows-latest' - linux 1.15: - goVersion: '1.15' - imageName: 'ubuntu-latest' - mac 1.15: - goVersion: '1.15' - imageName: 'macOS-latest' - windows 1.15: - goVersion: '1.15' - imageName: 'windows-latest' - pool: - vmImage: $(imageName) - steps: - - task: GoTool@0 - displayName: "Install Go $(goVersion)" - inputs: - version: $(goVersion) - - task: Go@0 - displayName: "go test ./..." - inputs: - command: 'test' - arguments: './...' -- stage: build_binaries - displayName: "Build binaries" - dependsOn: run_checks - jobs: - - job: build_binary - displayName: "Build binary" - strategy: - matrix: - linux_amd64: - GOOS: linux - GOARCH: amd64 - darwin_amd64: - GOOS: darwin - GOARCH: amd64 - windows_amd64: - GOOS: windows - GOARCH: amd64 - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go" - inputs: - version: 1.16 - - task: Bash@3 - inputs: - targetType: inline - script: "make dist" - env: - go.goos: $(GOOS) - go.goarch: $(GOARCH) - - task: CopyFiles@2 - inputs: - sourceFolder: '$(Build.SourcesDirectory)' - contents: '*.tar.xz' - TargetFolder: '$(Build.ArtifactStagingDirectory)' - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: '$(Build.ArtifactStagingDirectory)' - artifactName: binaries -- stage: build_binaries_manifest - displayName: "Build binaries manifest" - dependsOn: build_binaries - jobs: - - job: build_manifest - displayName: "Build binaries manifest" - steps: - - task: DownloadBuildArtifacts@0 - inputs: - buildType: 'current' - downloadType: 'single' - artifactName: 'binaries' - downloadPath: '$(Build.SourcesDirectory)' - - task: Bash@3 - inputs: - targetType: inline - script: "cd binaries && sha256sum --binary *.tar.xz | tee $(Build.ArtifactStagingDirectory)/sha256sums.txt" - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: '$(Build.ArtifactStagingDirectory)' - artifactName: manifest - -- stage: build_docker_image - displayName: "Build Docker image" - dependsOn: run_checks - jobs: - - job: build - displayName: "Build" - pool: - vmImage: ubuntu-latest - steps: - - task: Docker@2 - inputs: - command: 'build' - Dockerfile: 'Dockerfile' - buildContext: '.' - addPipelineData: false - -- stage: publish_docker_image - displayName: "Publish Docker image" - dependsOn: build_docker_image - condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master')) - jobs: - - job: publish - displayName: "Publish" - pool: - vmImage: ubuntu-latest - steps: - - task: Docker@2 - inputs: - containerRegistry: 'DockerHub' - repository: 'pelletier/go-toml' - command: 'buildAndPush' - Dockerfile: 'Dockerfile' - buildContext: '.' - tags: 'latest' diff --git a/test/performance/vendor/github.com/pelletier/go-toml/benchmark.sh b/test/performance/vendor/github.com/pelletier/go-toml/benchmark.sh deleted file mode 100644 index a69d3040fa..0000000000 --- a/test/performance/vendor/github.com/pelletier/go-toml/benchmark.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -set -ex - -reference_ref=${1:-master} -reference_git=${2:-.} - -if ! `hash benchstat 2>/dev/null`; then - echo "Installing benchstat" - go get golang.org/x/perf/cmd/benchstat -fi - -tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX` -ref_tempdir="${tempdir}/ref" -ref_benchmark="${ref_tempdir}/benchmark-`echo -n ${reference_ref}|tr -s '/' '-'`.txt" -local_benchmark="`pwd`/benchmark-local.txt" - -echo "=== ${reference_ref} (${ref_tempdir})" -git clone ${reference_git} ${ref_tempdir} >/dev/null 2>/dev/null -pushd ${ref_tempdir} >/dev/null -git checkout ${reference_ref} >/dev/null 2>/dev/null -go test -bench=. -benchmem | tee ${ref_benchmark} -cd benchmark -go test -bench=. -benchmem | tee -a ${ref_benchmark} -popd >/dev/null - -echo "" -echo "=== local" -go test -bench=. -benchmem | tee ${local_benchmark} -cd benchmark -go test -bench=. -benchmem | tee -a ${local_benchmark} - -echo "" -echo "=== diff" -benchstat -delta-test=none ${ref_benchmark} ${local_benchmark} diff --git a/test/performance/vendor/github.com/pelletier/go-toml/doc.go b/test/performance/vendor/github.com/pelletier/go-toml/doc.go deleted file mode 100644 index a1406a32b3..0000000000 --- a/test/performance/vendor/github.com/pelletier/go-toml/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Package toml is a TOML parser and manipulation library. -// -// This version supports the specification as described in -// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.5.0.md -// -// Marshaling -// -// Go-toml can marshal and unmarshal TOML documents from and to data -// structures. -// -// TOML document as a tree -// -// Go-toml can operate on a TOML document as a tree. Use one of the Load* -// functions to parse TOML data and obtain a Tree instance, then one of its -// methods to manipulate the tree. -// -// JSONPath-like queries -// -// The package github.com/pelletier/go-toml/query implements a system -// similar to JSONPath to quickly retrieve elements of a TOML document using a -// single expression. See the package documentation for more information. -// -package toml diff --git a/test/performance/vendor/github.com/pelletier/go-toml/example-crlf.toml b/test/performance/vendor/github.com/pelletier/go-toml/example-crlf.toml deleted file mode 100644 index 780d9c68f2..0000000000 --- a/test/performance/vendor/github.com/pelletier/go-toml/example-crlf.toml +++ /dev/null @@ -1,30 +0,0 @@ -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it -score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/test/performance/vendor/github.com/pelletier/go-toml/example.toml b/test/performance/vendor/github.com/pelletier/go-toml/example.toml deleted file mode 100644 index f45bf88b8f..0000000000 --- a/test/performance/vendor/github.com/pelletier/go-toml/example.toml +++ /dev/null @@ -1,30 +0,0 @@ -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it -score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/test/performance/vendor/github.com/pelletier/go-toml/fuzz.go b/test/performance/vendor/github.com/pelletier/go-toml/fuzz.go deleted file mode 100644 index 14570c8d35..0000000000 --- a/test/performance/vendor/github.com/pelletier/go-toml/fuzz.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build gofuzz - -package toml - -func Fuzz(data []byte) int { - tree, err := LoadBytes(data) - if err != nil { - if tree != nil { - panic("tree must be nil if there is an error") - } - return 0 - } - - str, err := tree.ToTomlString() - if err != nil { - if str != "" { - panic(`str must be "" if there is an error`) - } - panic(err) - } - - tree, err = Load(str) - if err != nil { - if tree != nil { - panic("tree must be nil if there is an error") - } - return 0 - } - - return 1 -} diff --git a/test/performance/vendor/github.com/pelletier/go-toml/fuzz.sh b/test/performance/vendor/github.com/pelletier/go-toml/fuzz.sh deleted file mode 100644 index 3204b4c446..0000000000 --- a/test/performance/vendor/github.com/pelletier/go-toml/fuzz.sh +++ /dev/null @@ -1,15 +0,0 @@ -#! /bin/sh -set -eu - -go get github.com/dvyukov/go-fuzz/go-fuzz -go get github.com/dvyukov/go-fuzz/go-fuzz-build - -if [ ! -e toml-fuzz.zip ]; then - go-fuzz-build github.com/pelletier/go-toml -fi - -rm -fr fuzz -mkdir -p fuzz/corpus -cp *.toml fuzz/corpus - -go-fuzz -bin=toml-fuzz.zip -workdir=fuzz diff --git a/test/performance/vendor/github.com/pelletier/go-toml/keysparsing.go b/test/performance/vendor/github.com/pelletier/go-toml/keysparsing.go deleted file mode 100644 index e091500b24..0000000000 --- a/test/performance/vendor/github.com/pelletier/go-toml/keysparsing.go +++ /dev/null @@ -1,112 +0,0 @@ -// Parsing keys handling both bare and quoted keys. - -package toml - -import ( - "errors" - "fmt" -) - -// Convert the bare key group string to an array. -// The input supports double quotation and single quotation, -// but escape sequences are not supported. Lexers must unescape them beforehand. -func parseKey(key string) ([]string, error) { - runes := []rune(key) - var groups []string - - if len(key) == 0 { - return nil, errors.New("empty key") - } - - idx := 0 - for idx < len(runes) { - for ; idx < len(runes) && isSpace(runes[idx]); idx++ { - // skip leading whitespace - } - if idx >= len(runes) { - break - } - r := runes[idx] - if isValidBareChar(r) { - // parse bare key - startIdx := idx - endIdx := -1 - idx++ - for idx < len(runes) { - r = runes[idx] - if isValidBareChar(r) { - idx++ - } else if r == '.' { - endIdx = idx - break - } else if isSpace(r) { - endIdx = idx - for ; idx < len(runes) && isSpace(runes[idx]); idx++ { - // skip trailing whitespace - } - if idx < len(runes) && runes[idx] != '.' { - return nil, fmt.Errorf("invalid key character after whitespace: %c", runes[idx]) - } - break - } else { - return nil, fmt.Errorf("invalid bare key character: %c", r) - } - } - if endIdx == -1 { - endIdx = idx - } - groups = append(groups, string(runes[startIdx:endIdx])) - } else if r == '\'' { - // parse single quoted key - idx++ - startIdx := idx - for { - if idx >= len(runes) { - return nil, fmt.Errorf("unclosed single-quoted key") - } - r = runes[idx] - if r == '\'' { - groups = append(groups, string(runes[startIdx:idx])) - idx++ - break - } - idx++ - } - } else if r == '"' { - // parse double quoted key - idx++ - startIdx := idx - for { - if idx >= len(runes) { - return nil, fmt.Errorf("unclosed double-quoted key") - } - r = runes[idx] - if r == '"' { - groups = append(groups, string(runes[startIdx:idx])) - idx++ - break - } - idx++ - } - } else if r == '.' { - idx++ - if idx >= len(runes) { - return nil, fmt.Errorf("unexpected end of key") - } - r = runes[idx] - if !isValidBareChar(r) && r != '\'' && r != '"' && r != ' ' { - return nil, fmt.Errorf("expecting key part after dot") - } - } else { - return nil, fmt.Errorf("invalid key character: %c", r) - } - } - if len(groups) == 0 { - return nil, fmt.Errorf("empty key") - } - return groups, nil -} - -func isValidBareChar(r rune) bool { - return isAlphanumeric(r) || r == '-' || isDigit(r) -} diff --git a/test/performance/vendor/github.com/pelletier/go-toml/lexer.go b/test/performance/vendor/github.com/pelletier/go-toml/lexer.go deleted file mode 100644 index 313908e3e0..0000000000 --- a/test/performance/vendor/github.com/pelletier/go-toml/lexer.go +++ /dev/null @@ -1,1031 +0,0 @@ -// TOML lexer. -// -// Written using the principles developed by Rob Pike in -// http://www.youtube.com/watch?v=HxaD_trXwRE - -package toml - -import ( - "bytes" - "errors" - "fmt" - "strconv" - "strings" -) - -// Define state functions -type tomlLexStateFn func() tomlLexStateFn - -// Define lexer -type tomlLexer struct { - inputIdx int - input []rune // Textual source - currentTokenStart int - currentTokenStop int - tokens []token - brackets []rune - line int - col int - endbufferLine int - endbufferCol int -} - -// Basic read operations on input - -func (l *tomlLexer) read() rune { - r := l.peek() - if r == '\n' { - l.endbufferLine++ - l.endbufferCol = 1 - } else { - l.endbufferCol++ - } - l.inputIdx++ - return r -} - -func (l *tomlLexer) next() rune { - r := l.read() - - if r != eof { - l.currentTokenStop++ - } - return r -} - -func (l *tomlLexer) ignore() { - l.currentTokenStart = l.currentTokenStop - l.line = l.endbufferLine - l.col = l.endbufferCol -} - -func (l *tomlLexer) skip() { - l.next() - l.ignore() -} - -func (l *tomlLexer) fastForward(n int) { - for i := 0; i < n; i++ { - l.next() - } -} - -func (l *tomlLexer) emitWithValue(t tokenType, value string) { - l.tokens = append(l.tokens, token{ - Position: Position{l.line, l.col}, - typ: t, - val: value, - }) - l.ignore() -} - -func (l *tomlLexer) emit(t tokenType) { - l.emitWithValue(t, string(l.input[l.currentTokenStart:l.currentTokenStop])) -} - -func (l *tomlLexer) peek() rune { - if l.inputIdx >= len(l.input) { - return eof - } - return l.input[l.inputIdx] -} - -func (l *tomlLexer) peekString(size int) string { - maxIdx := len(l.input) - upperIdx := l.inputIdx + size // FIXME: potential overflow - if upperIdx > maxIdx { - upperIdx = maxIdx - } - return string(l.input[l.inputIdx:upperIdx]) -} - -func (l *tomlLexer) follow(next string) bool { - return next == l.peekString(len(next)) -} - -// Error management - -func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn { - l.tokens = append(l.tokens, token{ - Position: Position{l.line, l.col}, - typ: tokenError, - val: fmt.Sprintf(format, args...), - }) - return nil -} - -// State functions - -func (l *tomlLexer) lexVoid() tomlLexStateFn { - for { - next := l.peek() - switch next { - case '}': // after '{' - return l.lexRightCurlyBrace - case '[': - return l.lexTableKey - case '#': - return l.lexComment(l.lexVoid) - case '=': - return l.lexEqual - case '\r': - fallthrough - case '\n': - l.skip() - continue - } - - if isSpace(next) { - l.skip() - } - - if isKeyStartChar(next) { - return l.lexKey - } - - if next == eof { - l.next() - break - } - } - - l.emit(tokenEOF) - return nil -} - -func (l *tomlLexer) lexRvalue() tomlLexStateFn { - for { - next := l.peek() - switch next { - case '.': - return l.errorf("cannot start float with a dot") - case '=': - return l.lexEqual - case '[': - return l.lexLeftBracket - case ']': - return l.lexRightBracket - case '{': - return l.lexLeftCurlyBrace - case '}': - return l.lexRightCurlyBrace - case '#': - return l.lexComment(l.lexRvalue) - case '"': - return l.lexString - case '\'': - return l.lexLiteralString - case ',': - return l.lexComma - case '\r': - fallthrough - case '\n': - l.skip() - if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '[' { - return l.lexRvalue - } - return l.lexVoid - } - - if l.follow("true") { - return l.lexTrue - } - - if l.follow("false") { - return l.lexFalse - } - - if l.follow("inf") { - return l.lexInf - } - - if l.follow("nan") { - return l.lexNan - } - - if isSpace(next) { - l.skip() - continue - } - - if next == eof { - l.next() - break - } - - if next == '+' || next == '-' { - return l.lexNumber - } - - if isDigit(next) { - return l.lexDateTimeOrNumber - } - - return l.errorf("no value can start with %c", next) - } - - l.emit(tokenEOF) - return nil -} - -func (l *tomlLexer) lexDateTimeOrNumber() tomlLexStateFn { - // Could be either a date/time, or a digit. - // The options for date/times are: - // YYYY-... => date or date-time - // HH:... => time - // Anything else should be a number. - - lookAhead := l.peekString(5) - if len(lookAhead) < 3 { - return l.lexNumber() - } - - for idx, r := range lookAhead { - if !isDigit(r) { - if idx == 2 && r == ':' { - return l.lexDateTimeOrTime() - } - if idx == 4 && r == '-' { - return l.lexDateTimeOrTime() - } - return l.lexNumber() - } - } - return l.lexNumber() -} - -func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn { - l.next() - l.emit(tokenLeftCurlyBrace) - l.brackets = append(l.brackets, '{') - return l.lexVoid -} - -func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn { - l.next() - l.emit(tokenRightCurlyBrace) - if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '{' { - return l.errorf("cannot have '}' here") - } - l.brackets = l.brackets[:len(l.brackets)-1] - return l.lexRvalue -} - -func (l *tomlLexer) lexDateTimeOrTime() tomlLexStateFn { - // Example matches: - // 1979-05-27T07:32:00Z - // 1979-05-27T00:32:00-07:00 - // 1979-05-27T00:32:00.999999-07:00 - // 1979-05-27 07:32:00Z - // 1979-05-27 00:32:00-07:00 - // 1979-05-27 00:32:00.999999-07:00 - // 1979-05-27T07:32:00 - // 1979-05-27T00:32:00.999999 - // 1979-05-27 07:32:00 - // 1979-05-27 00:32:00.999999 - // 1979-05-27 - // 07:32:00 - // 00:32:00.999999 - - // we already know those two are digits - l.next() - l.next() - - // Got 2 digits. At that point it could be either a time or a date(-time). - - r := l.next() - if r == ':' { - return l.lexTime() - } - - return l.lexDateTime() -} - -func (l *tomlLexer) lexDateTime() tomlLexStateFn { - // This state accepts an offset date-time, a local date-time, or a local date. - // - // v--- cursor - // 1979-05-27T07:32:00Z - // 1979-05-27T00:32:00-07:00 - // 1979-05-27T00:32:00.999999-07:00 - // 1979-05-27 07:32:00Z - // 1979-05-27 00:32:00-07:00 - // 1979-05-27 00:32:00.999999-07:00 - // 1979-05-27T07:32:00 - // 1979-05-27T00:32:00.999999 - // 1979-05-27 07:32:00 - // 1979-05-27 00:32:00.999999 - // 1979-05-27 - - // date - - // already checked by lexRvalue - l.next() // digit - l.next() // - - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid month digit in date: %c", r) - } - } - - r := l.next() - if r != '-' { - return l.errorf("expected - to separate month of a date, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid day digit in date: %c", r) - } - } - - l.emit(tokenLocalDate) - - r = l.peek() - - if r == eof { - - return l.lexRvalue - } - - if r != ' ' && r != 'T' { - return l.errorf("incorrect date/time separation character: %c", r) - } - - if r == ' ' { - lookAhead := l.peekString(3)[1:] - if len(lookAhead) < 2 { - return l.lexRvalue - } - for _, r := range lookAhead { - if !isDigit(r) { - return l.lexRvalue - } - } - } - - l.skip() // skip the T or ' ' - - // time - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid hour digit in time: %c", r) - } - } - - r = l.next() - if r != ':' { - return l.errorf("time hour/minute separator should be :, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid minute digit in time: %c", r) - } - } - - r = l.next() - if r != ':' { - return l.errorf("time minute/second separator should be :, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid second digit in time: %c", r) - } - } - - r = l.peek() - if r == '.' { - l.next() - r := l.next() - if !isDigit(r) { - return l.errorf("expected at least one digit in time's fraction, not %c", r) - } - - for { - r := l.peek() - if !isDigit(r) { - break - } - l.next() - } - } - - l.emit(tokenLocalTime) - - return l.lexTimeOffset - -} - -func (l *tomlLexer) lexTimeOffset() tomlLexStateFn { - // potential offset - - // Z - // -07:00 - // +07:00 - // nothing - - r := l.peek() - - if r == 'Z' { - l.next() - l.emit(tokenTimeOffset) - } else if r == '+' || r == '-' { - l.next() - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid hour digit in time offset: %c", r) - } - } - - r = l.next() - if r != ':' { - return l.errorf("time offset hour/minute separator should be :, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid minute digit in time offset: %c", r) - } - } - - l.emit(tokenTimeOffset) - } - - return l.lexRvalue -} - -func (l *tomlLexer) lexTime() tomlLexStateFn { - // v--- cursor - // 07:32:00 - // 00:32:00.999999 - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid minute digit in time: %c", r) - } - } - - r := l.next() - if r != ':' { - return l.errorf("time minute/second separator should be :, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid second digit in time: %c", r) - } - } - - r = l.peek() - if r == '.' { - l.next() - r := l.next() - if !isDigit(r) { - return l.errorf("expected at least one digit in time's fraction, not %c", r) - } - - for { - r := l.peek() - if !isDigit(r) { - break - } - l.next() - } - } - - l.emit(tokenLocalTime) - return l.lexRvalue - -} - -func (l *tomlLexer) lexTrue() tomlLexStateFn { - l.fastForward(4) - l.emit(tokenTrue) - return l.lexRvalue -} - -func (l *tomlLexer) lexFalse() tomlLexStateFn { - l.fastForward(5) - l.emit(tokenFalse) - return l.lexRvalue -} - -func (l *tomlLexer) lexInf() tomlLexStateFn { - l.fastForward(3) - l.emit(tokenInf) - return l.lexRvalue -} - -func (l *tomlLexer) lexNan() tomlLexStateFn { - l.fastForward(3) - l.emit(tokenNan) - return l.lexRvalue -} - -func (l *tomlLexer) lexEqual() tomlLexStateFn { - l.next() - l.emit(tokenEqual) - return l.lexRvalue -} - -func (l *tomlLexer) lexComma() tomlLexStateFn { - l.next() - l.emit(tokenComma) - if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '{' { - return l.lexVoid - } - return l.lexRvalue -} - -// Parse the key and emits its value without escape sequences. -// bare keys, basic string keys and literal string keys are supported. -func (l *tomlLexer) lexKey() tomlLexStateFn { - var sb strings.Builder - - for r := l.peek(); isKeyChar(r) || r == '\n' || r == '\r'; r = l.peek() { - if r == '"' { - l.next() - str, err := l.lexStringAsString(`"`, false, true) - if err != nil { - return l.errorf(err.Error()) - } - sb.WriteString("\"") - sb.WriteString(str) - sb.WriteString("\"") - l.next() - continue - } else if r == '\'' { - l.next() - str, err := l.lexLiteralStringAsString(`'`, false) - if err != nil { - return l.errorf(err.Error()) - } - sb.WriteString("'") - sb.WriteString(str) - sb.WriteString("'") - l.next() - continue - } else if r == '\n' { - return l.errorf("keys cannot contain new lines") - } else if isSpace(r) { - var str strings.Builder - str.WriteString(" ") - - // skip trailing whitespace - l.next() - for r = l.peek(); isSpace(r); r = l.peek() { - str.WriteRune(r) - l.next() - } - // break loop if not a dot - if r != '.' { - break - } - str.WriteString(".") - // skip trailing whitespace after dot - l.next() - for r = l.peek(); isSpace(r); r = l.peek() { - str.WriteRune(r) - l.next() - } - sb.WriteString(str.String()) - continue - } else if r == '.' { - // skip - } else if !isValidBareChar(r) { - return l.errorf("keys cannot contain %c character", r) - } - sb.WriteRune(r) - l.next() - } - l.emitWithValue(tokenKey, sb.String()) - return l.lexVoid -} - -func (l *tomlLexer) lexComment(previousState tomlLexStateFn) tomlLexStateFn { - return func() tomlLexStateFn { - for next := l.peek(); next != '\n' && next != eof; next = l.peek() { - if next == '\r' && l.follow("\r\n") { - break - } - l.next() - } - l.ignore() - return previousState - } -} - -func (l *tomlLexer) lexLeftBracket() tomlLexStateFn { - l.next() - l.emit(tokenLeftBracket) - l.brackets = append(l.brackets, '[') - return l.lexRvalue -} - -func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNewLine bool) (string, error) { - var sb strings.Builder - - if discardLeadingNewLine { - if l.follow("\r\n") { - l.skip() - l.skip() - } else if l.peek() == '\n' { - l.skip() - } - } - - // find end of string - for { - if l.follow(terminator) { - return sb.String(), nil - } - - next := l.peek() - if next == eof { - break - } - sb.WriteRune(l.next()) - } - - return "", errors.New("unclosed string") -} - -func (l *tomlLexer) lexLiteralString() tomlLexStateFn { - l.skip() - - // handle special case for triple-quote - terminator := "'" - discardLeadingNewLine := false - if l.follow("''") { - l.skip() - l.skip() - terminator = "'''" - discardLeadingNewLine = true - } - - str, err := l.lexLiteralStringAsString(terminator, discardLeadingNewLine) - if err != nil { - return l.errorf(err.Error()) - } - - l.emitWithValue(tokenString, str) - l.fastForward(len(terminator)) - l.ignore() - return l.lexRvalue -} - -// Lex a string and return the results as a string. -// Terminator is the substring indicating the end of the token. -// The resulting string does not include the terminator. -func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, acceptNewLines bool) (string, error) { - var sb strings.Builder - - if discardLeadingNewLine { - if l.follow("\r\n") { - l.skip() - l.skip() - } else if l.peek() == '\n' { - l.skip() - } - } - - for { - if l.follow(terminator) { - return sb.String(), nil - } - - if l.follow("\\") { - l.next() - switch l.peek() { - case '\r': - fallthrough - case '\n': - fallthrough - case '\t': - fallthrough - case ' ': - // skip all whitespace chars following backslash - for strings.ContainsRune("\r\n\t ", l.peek()) { - l.next() - } - case '"': - sb.WriteString("\"") - l.next() - case 'n': - sb.WriteString("\n") - l.next() - case 'b': - sb.WriteString("\b") - l.next() - case 'f': - sb.WriteString("\f") - l.next() - case '/': - sb.WriteString("/") - l.next() - case 't': - sb.WriteString("\t") - l.next() - case 'r': - sb.WriteString("\r") - l.next() - case '\\': - sb.WriteString("\\") - l.next() - case 'u': - l.next() - var code strings.Builder - for i := 0; i < 4; i++ { - c := l.peek() - if !isHexDigit(c) { - return "", errors.New("unfinished unicode escape") - } - l.next() - code.WriteRune(c) - } - intcode, err := strconv.ParseInt(code.String(), 16, 32) - if err != nil { - return "", errors.New("invalid unicode escape: \\u" + code.String()) - } - sb.WriteRune(rune(intcode)) - case 'U': - l.next() - var code strings.Builder - for i := 0; i < 8; i++ { - c := l.peek() - if !isHexDigit(c) { - return "", errors.New("unfinished unicode escape") - } - l.next() - code.WriteRune(c) - } - intcode, err := strconv.ParseInt(code.String(), 16, 64) - if err != nil { - return "", errors.New("invalid unicode escape: \\U" + code.String()) - } - sb.WriteRune(rune(intcode)) - default: - return "", errors.New("invalid escape sequence: \\" + string(l.peek())) - } - } else { - r := l.peek() - - if 0x00 <= r && r <= 0x1F && r != '\t' && !(acceptNewLines && (r == '\n' || r == '\r')) { - return "", fmt.Errorf("unescaped control character %U", r) - } - l.next() - sb.WriteRune(r) - } - - if l.peek() == eof { - break - } - } - - return "", errors.New("unclosed string") -} - -func (l *tomlLexer) lexString() tomlLexStateFn { - l.skip() - - // handle special case for triple-quote - terminator := `"` - discardLeadingNewLine := false - acceptNewLines := false - if l.follow(`""`) { - l.skip() - l.skip() - terminator = `"""` - discardLeadingNewLine = true - acceptNewLines = true - } - - str, err := l.lexStringAsString(terminator, discardLeadingNewLine, acceptNewLines) - if err != nil { - return l.errorf(err.Error()) - } - - l.emitWithValue(tokenString, str) - l.fastForward(len(terminator)) - l.ignore() - return l.lexRvalue -} - -func (l *tomlLexer) lexTableKey() tomlLexStateFn { - l.next() - - if l.peek() == '[' { - // token '[[' signifies an array of tables - l.next() - l.emit(tokenDoubleLeftBracket) - return l.lexInsideTableArrayKey - } - // vanilla table key - l.emit(tokenLeftBracket) - return l.lexInsideTableKey -} - -// Parse the key till "]]", but only bare keys are supported -func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn { - for r := l.peek(); r != eof; r = l.peek() { - switch r { - case ']': - if l.currentTokenStop > l.currentTokenStart { - l.emit(tokenKeyGroupArray) - } - l.next() - if l.peek() != ']' { - break - } - l.next() - l.emit(tokenDoubleRightBracket) - return l.lexVoid - case '[': - return l.errorf("table array key cannot contain ']'") - default: - l.next() - } - } - return l.errorf("unclosed table array key") -} - -// Parse the key till "]" but only bare keys are supported -func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn { - for r := l.peek(); r != eof; r = l.peek() { - switch r { - case ']': - if l.currentTokenStop > l.currentTokenStart { - l.emit(tokenKeyGroup) - } - l.next() - l.emit(tokenRightBracket) - return l.lexVoid - case '[': - return l.errorf("table key cannot contain ']'") - default: - l.next() - } - } - return l.errorf("unclosed table key") -} - -func (l *tomlLexer) lexRightBracket() tomlLexStateFn { - l.next() - l.emit(tokenRightBracket) - if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '[' { - return l.errorf("cannot have ']' here") - } - l.brackets = l.brackets[:len(l.brackets)-1] - return l.lexRvalue -} - -type validRuneFn func(r rune) bool - -func isValidHexRune(r rune) bool { - return r >= 'a' && r <= 'f' || - r >= 'A' && r <= 'F' || - r >= '0' && r <= '9' || - r == '_' -} - -func isValidOctalRune(r rune) bool { - return r >= '0' && r <= '7' || r == '_' -} - -func isValidBinaryRune(r rune) bool { - return r == '0' || r == '1' || r == '_' -} - -func (l *tomlLexer) lexNumber() tomlLexStateFn { - r := l.peek() - - if r == '0' { - follow := l.peekString(2) - if len(follow) == 2 { - var isValidRune validRuneFn - switch follow[1] { - case 'x': - isValidRune = isValidHexRune - case 'o': - isValidRune = isValidOctalRune - case 'b': - isValidRune = isValidBinaryRune - default: - if follow[1] >= 'a' && follow[1] <= 'z' || follow[1] >= 'A' && follow[1] <= 'Z' { - return l.errorf("unknown number base: %s. possible options are x (hex) o (octal) b (binary)", string(follow[1])) - } - } - - if isValidRune != nil { - l.next() - l.next() - digitSeen := false - for { - next := l.peek() - if !isValidRune(next) { - break - } - digitSeen = true - l.next() - } - - if !digitSeen { - return l.errorf("number needs at least one digit") - } - - l.emit(tokenInteger) - - return l.lexRvalue - } - } - } - - if r == '+' || r == '-' { - l.next() - if l.follow("inf") { - return l.lexInf - } - if l.follow("nan") { - return l.lexNan - } - } - - pointSeen := false - expSeen := false - digitSeen := false - for { - next := l.peek() - if next == '.' { - if pointSeen { - return l.errorf("cannot have two dots in one float") - } - l.next() - if !isDigit(l.peek()) { - return l.errorf("float cannot end with a dot") - } - pointSeen = true - } else if next == 'e' || next == 'E' { - expSeen = true - l.next() - r := l.peek() - if r == '+' || r == '-' { - l.next() - } - } else if isDigit(next) { - digitSeen = true - l.next() - } else if next == '_' { - l.next() - } else { - break - } - if pointSeen && !digitSeen { - return l.errorf("cannot start float with a dot") - } - } - - if !digitSeen { - return l.errorf("no digit in that number") - } - if pointSeen || expSeen { - l.emit(tokenFloat) - } else { - l.emit(tokenInteger) - } - return l.lexRvalue -} - -func (l *tomlLexer) run() { - for state := l.lexVoid; state != nil; { - state = state() - } -} - -// Entry point -func lexToml(inputBytes []byte) []token { - runes := bytes.Runes(inputBytes) - l := &tomlLexer{ - input: runes, - tokens: make([]token, 0, 256), - line: 1, - col: 1, - endbufferLine: 1, - endbufferCol: 1, - } - l.run() - return l.tokens -} diff --git a/test/performance/vendor/github.com/pelletier/go-toml/localtime.go b/test/performance/vendor/github.com/pelletier/go-toml/localtime.go deleted file mode 100644 index 9dfe4b9e6c..0000000000 --- a/test/performance/vendor/github.com/pelletier/go-toml/localtime.go +++ /dev/null @@ -1,287 +0,0 @@ -// Implementation of TOML's local date/time. -// -// Copied over from Google's civil to avoid pulling all the Google dependencies. -// Originals: -// https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil.go -// Changes: -// * Renamed files from civil* to localtime*. -// * Package changed from civil to toml. -// * 'Local' prefix added to all structs. -// -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package civil implements types for civil time, a time-zone-independent -// representation of time that follows the rules of the proleptic -// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second -// minutes. -// -// Because they lack location information, these types do not represent unique -// moments or intervals of time. Use time.Time for that purpose. -package toml - -import ( - "fmt" - "time" -) - -// A LocalDate represents a date (year, month, day). -// -// This type does not include location information, and therefore does not -// describe a unique 24-hour timespan. -type LocalDate struct { - Year int // Year (e.g., 2014). - Month time.Month // Month of the year (January = 1, ...). - Day int // Day of the month, starting at 1. -} - -// LocalDateOf returns the LocalDate in which a time occurs in that time's location. -func LocalDateOf(t time.Time) LocalDate { - var d LocalDate - d.Year, d.Month, d.Day = t.Date() - return d -} - -// ParseLocalDate parses a string in RFC3339 full-date format and returns the date value it represents. -func ParseLocalDate(s string) (LocalDate, error) { - t, err := time.Parse("2006-01-02", s) - if err != nil { - return LocalDate{}, err - } - return LocalDateOf(t), nil -} - -// String returns the date in RFC3339 full-date format. -func (d LocalDate) String() string { - return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) -} - -// IsValid reports whether the date is valid. -func (d LocalDate) IsValid() bool { - return LocalDateOf(d.In(time.UTC)) == d -} - -// In returns the time corresponding to time 00:00:00 of the date in the location. -// -// In is always consistent with time.LocalDate, even when time.LocalDate returns a time -// on a different day. For example, if loc is America/Indiana/Vincennes, then both -// time.LocalDate(1955, time.May, 1, 0, 0, 0, 0, loc) -// and -// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}.In(loc) -// return 23:00:00 on April 30, 1955. -// -// In panics if loc is nil. -func (d LocalDate) In(loc *time.Location) time.Time { - return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc) -} - -// AddDays returns the date that is n days in the future. -// n can also be negative to go into the past. -func (d LocalDate) AddDays(n int) LocalDate { - return LocalDateOf(d.In(time.UTC).AddDate(0, 0, n)) -} - -// DaysSince returns the signed number of days between the date and s, not including the end day. -// This is the inverse operation to AddDays. -func (d LocalDate) DaysSince(s LocalDate) (days int) { - // We convert to Unix time so we do not have to worry about leap seconds: - // Unix time increases by exactly 86400 seconds per day. - deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix() - return int(deltaUnix / 86400) -} - -// Before reports whether d1 occurs before d2. -func (d1 LocalDate) Before(d2 LocalDate) bool { - if d1.Year != d2.Year { - return d1.Year < d2.Year - } - if d1.Month != d2.Month { - return d1.Month < d2.Month - } - return d1.Day < d2.Day -} - -// After reports whether d1 occurs after d2. -func (d1 LocalDate) After(d2 LocalDate) bool { - return d2.Before(d1) -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The output is the result of d.String(). -func (d LocalDate) MarshalText() ([]byte, error) { - return []byte(d.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// The date is expected to be a string in a format accepted by ParseLocalDate. -func (d *LocalDate) UnmarshalText(data []byte) error { - var err error - *d, err = ParseLocalDate(string(data)) - return err -} - -// A LocalTime represents a time with nanosecond precision. -// -// This type does not include location information, and therefore does not -// describe a unique moment in time. -// -// This type exists to represent the TIME type in storage-based APIs like BigQuery. -// Most operations on Times are unlikely to be meaningful. Prefer the LocalDateTime type. -type LocalTime struct { - Hour int // The hour of the day in 24-hour format; range [0-23] - Minute int // The minute of the hour; range [0-59] - Second int // The second of the minute; range [0-59] - Nanosecond int // The nanosecond of the second; range [0-999999999] -} - -// LocalTimeOf returns the LocalTime representing the time of day in which a time occurs -// in that time's location. It ignores the date. -func LocalTimeOf(t time.Time) LocalTime { - var tm LocalTime - tm.Hour, tm.Minute, tm.Second = t.Clock() - tm.Nanosecond = t.Nanosecond() - return tm -} - -// ParseLocalTime parses a string and returns the time value it represents. -// ParseLocalTime accepts an extended form of the RFC3339 partial-time format. After -// the HH:MM:SS part of the string, an optional fractional part may appear, -// consisting of a decimal point followed by one to nine decimal digits. -// (RFC3339 admits only one digit after the decimal point). -func ParseLocalTime(s string) (LocalTime, error) { - t, err := time.Parse("15:04:05.999999999", s) - if err != nil { - return LocalTime{}, err - } - return LocalTimeOf(t), nil -} - -// String returns the date in the format described in ParseLocalTime. If Nanoseconds -// is zero, no fractional part will be generated. Otherwise, the result will -// end with a fractional part consisting of a decimal point and nine digits. -func (t LocalTime) String() string { - s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second) - if t.Nanosecond == 0 { - return s - } - return s + fmt.Sprintf(".%09d", t.Nanosecond) -} - -// IsValid reports whether the time is valid. -func (t LocalTime) IsValid() bool { - // Construct a non-zero time. - tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC) - return LocalTimeOf(tm) == t -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The output is the result of t.String(). -func (t LocalTime) MarshalText() ([]byte, error) { - return []byte(t.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// The time is expected to be a string in a format accepted by ParseLocalTime. -func (t *LocalTime) UnmarshalText(data []byte) error { - var err error - *t, err = ParseLocalTime(string(data)) - return err -} - -// A LocalDateTime represents a date and time. -// -// This type does not include location information, and therefore does not -// describe a unique moment in time. -type LocalDateTime struct { - Date LocalDate - Time LocalTime -} - -// Note: We deliberately do not embed LocalDate into LocalDateTime, to avoid promoting AddDays and Sub. - -// LocalDateTimeOf returns the LocalDateTime in which a time occurs in that time's location. -func LocalDateTimeOf(t time.Time) LocalDateTime { - return LocalDateTime{ - Date: LocalDateOf(t), - Time: LocalTimeOf(t), - } -} - -// ParseLocalDateTime parses a string and returns the LocalDateTime it represents. -// ParseLocalDateTime accepts a variant of the RFC3339 date-time format that omits -// the time offset but includes an optional fractional time, as described in -// ParseLocalTime. Informally, the accepted format is -// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF] -// where the 'T' may be a lower-case 't'. -func ParseLocalDateTime(s string) (LocalDateTime, error) { - t, err := time.Parse("2006-01-02T15:04:05.999999999", s) - if err != nil { - t, err = time.Parse("2006-01-02t15:04:05.999999999", s) - if err != nil { - return LocalDateTime{}, err - } - } - return LocalDateTimeOf(t), nil -} - -// String returns the date in the format described in ParseLocalDate. -func (dt LocalDateTime) String() string { - return dt.Date.String() + "T" + dt.Time.String() -} - -// IsValid reports whether the datetime is valid. -func (dt LocalDateTime) IsValid() bool { - return dt.Date.IsValid() && dt.Time.IsValid() -} - -// In returns the time corresponding to the LocalDateTime in the given location. -// -// If the time is missing or ambigous at the location, In returns the same -// result as time.LocalDate. For example, if loc is America/Indiana/Vincennes, then -// both -// time.LocalDate(1955, time.May, 1, 0, 30, 0, 0, loc) -// and -// civil.LocalDateTime{ -// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}}, -// civil.LocalTime{Minute: 30}}.In(loc) -// return 23:30:00 on April 30, 1955. -// -// In panics if loc is nil. -func (dt LocalDateTime) In(loc *time.Location) time.Time { - return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc) -} - -// Before reports whether dt1 occurs before dt2. -func (dt1 LocalDateTime) Before(dt2 LocalDateTime) bool { - return dt1.In(time.UTC).Before(dt2.In(time.UTC)) -} - -// After reports whether dt1 occurs after dt2. -func (dt1 LocalDateTime) After(dt2 LocalDateTime) bool { - return dt2.Before(dt1) -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The output is the result of dt.String(). -func (dt LocalDateTime) MarshalText() ([]byte, error) { - return []byte(dt.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// The datetime is expected to be a string in a format accepted by ParseLocalDateTime -func (dt *LocalDateTime) UnmarshalText(data []byte) error { - var err error - *dt, err = ParseLocalDateTime(string(data)) - return err -} diff --git a/test/performance/vendor/github.com/pelletier/go-toml/marshal.go b/test/performance/vendor/github.com/pelletier/go-toml/marshal.go deleted file mode 100644 index 5712730498..0000000000 --- a/test/performance/vendor/github.com/pelletier/go-toml/marshal.go +++ /dev/null @@ -1,1308 +0,0 @@ -package toml - -import ( - "bytes" - "encoding" - "errors" - "fmt" - "io" - "reflect" - "sort" - "strconv" - "strings" - "time" -) - -const ( - tagFieldName = "toml" - tagFieldComment = "comment" - tagCommented = "commented" - tagMultiline = "multiline" - tagLiteral = "literal" - tagDefault = "default" -) - -type tomlOpts struct { - name string - nameFromTag bool - comment string - commented bool - multiline bool - literal bool - include bool - omitempty bool - defaultValue string -} - -type encOpts struct { - quoteMapKeys bool - arraysOneElementPerLine bool -} - -var encOptsDefaults = encOpts{ - quoteMapKeys: false, -} - -type annotation struct { - tag string - comment string - commented string - multiline string - literal string - defaultValue string -} - -var annotationDefault = annotation{ - tag: tagFieldName, - comment: tagFieldComment, - commented: tagCommented, - multiline: tagMultiline, - literal: tagLiteral, - defaultValue: tagDefault, -} - -type MarshalOrder int - -// Orders the Encoder can write the fields to the output stream. -const ( - // Sort fields alphabetically. - OrderAlphabetical MarshalOrder = iota + 1 - // Preserve the order the fields are encountered. For example, the order of fields in - // a struct. - OrderPreserve -) - -var timeType = reflect.TypeOf(time.Time{}) -var marshalerType = reflect.TypeOf(new(Marshaler)).Elem() -var unmarshalerType = reflect.TypeOf(new(Unmarshaler)).Elem() -var textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem() -var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() -var localDateType = reflect.TypeOf(LocalDate{}) -var localTimeType = reflect.TypeOf(LocalTime{}) -var localDateTimeType = reflect.TypeOf(LocalDateTime{}) -var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{}) - -// Check if the given marshal type maps to a Tree primitive -func isPrimitive(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isPrimitive(mtype.Elem()) - case reflect.Bool: - return true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Struct: - return isTimeType(mtype) - default: - return false - } -} - -func isTimeType(mtype reflect.Type) bool { - return mtype == timeType || mtype == localDateType || mtype == localDateTimeType || mtype == localTimeType -} - -// Check if the given marshal type maps to a Tree slice or array -func isTreeSequence(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isTreeSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return isTree(mtype.Elem()) - default: - return false - } -} - -// Check if the given marshal type maps to a slice or array of a custom marshaler type -func isCustomMarshalerSequence(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isCustomMarshalerSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return isCustomMarshaler(mtype.Elem()) || isCustomMarshaler(reflect.New(mtype.Elem()).Type()) - default: - return false - } -} - -// Check if the given marshal type maps to a slice or array of a text marshaler type -func isTextMarshalerSequence(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isTextMarshalerSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return isTextMarshaler(mtype.Elem()) || isTextMarshaler(reflect.New(mtype.Elem()).Type()) - default: - return false - } -} - -// Check if the given marshal type maps to a non-Tree slice or array -func isOtherSequence(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isOtherSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return !isTreeSequence(mtype) - default: - return false - } -} - -// Check if the given marshal type maps to a Tree -func isTree(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isTree(mtype.Elem()) - case reflect.Map: - return true - case reflect.Struct: - return !isPrimitive(mtype) - default: - return false - } -} - -func isCustomMarshaler(mtype reflect.Type) bool { - return mtype.Implements(marshalerType) -} - -func callCustomMarshaler(mval reflect.Value) ([]byte, error) { - return mval.Interface().(Marshaler).MarshalTOML() -} - -func isTextMarshaler(mtype reflect.Type) bool { - return mtype.Implements(textMarshalerType) && !isTimeType(mtype) -} - -func callTextMarshaler(mval reflect.Value) ([]byte, error) { - return mval.Interface().(encoding.TextMarshaler).MarshalText() -} - -func isCustomUnmarshaler(mtype reflect.Type) bool { - return mtype.Implements(unmarshalerType) -} - -func callCustomUnmarshaler(mval reflect.Value, tval interface{}) error { - return mval.Interface().(Unmarshaler).UnmarshalTOML(tval) -} - -func isTextUnmarshaler(mtype reflect.Type) bool { - return mtype.Implements(textUnmarshalerType) -} - -func callTextUnmarshaler(mval reflect.Value, text []byte) error { - return mval.Interface().(encoding.TextUnmarshaler).UnmarshalText(text) -} - -// Marshaler is the interface implemented by types that -// can marshal themselves into valid TOML. -type Marshaler interface { - MarshalTOML() ([]byte, error) -} - -// Unmarshaler is the interface implemented by types that -// can unmarshal a TOML description of themselves. -type Unmarshaler interface { - UnmarshalTOML(interface{}) error -} - -/* -Marshal returns the TOML encoding of v. Behavior is similar to the Go json -encoder, except that there is no concept of a Marshaler interface or MarshalTOML -function for sub-structs, and currently only definite types can be marshaled -(i.e. no `interface{}`). - -The following struct annotations are supported: - - toml:"Field" Overrides the field's name to output. - omitempty When set, empty values and groups are not emitted. - comment:"comment" Emits a # comment on the same line. This supports new lines. - commented:"true" Emits the value as commented. - -Note that pointers are automatically assigned the "omitempty" option, as TOML -explicitly does not handle null values (saying instead the label should be -dropped). - -Tree structural types and corresponding marshal types: - - *Tree (*)struct, (*)map[string]interface{} - []*Tree (*)[](*)struct, (*)[](*)map[string]interface{} - []interface{} (as interface{}) (*)[]primitive, (*)[]([]interface{}) - interface{} (*)primitive - -Tree primitive types and corresponding marshal types: - - uint64 uint, uint8-uint64, pointers to same - int64 int, int8-uint64, pointers to same - float64 float32, float64, pointers to same - string string, pointers to same - bool bool, pointers to same - time.LocalTime time.LocalTime{}, pointers to same - -For additional flexibility, use the Encoder API. -*/ -func Marshal(v interface{}) ([]byte, error) { - return NewEncoder(nil).marshal(v) -} - -// Encoder writes TOML values to an output stream. -type Encoder struct { - w io.Writer - encOpts - annotation - line int - col int - order MarshalOrder - promoteAnon bool - compactComments bool - indentation string -} - -// NewEncoder returns a new encoder that writes to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: w, - encOpts: encOptsDefaults, - annotation: annotationDefault, - line: 0, - col: 1, - order: OrderAlphabetical, - indentation: " ", - } -} - -// Encode writes the TOML encoding of v to the stream. -// -// See the documentation for Marshal for details. -func (e *Encoder) Encode(v interface{}) error { - b, err := e.marshal(v) - if err != nil { - return err - } - if _, err := e.w.Write(b); err != nil { - return err - } - return nil -} - -// QuoteMapKeys sets up the encoder to encode -// maps with string type keys with quoted TOML keys. -// -// This relieves the character limitations on map keys. -func (e *Encoder) QuoteMapKeys(v bool) *Encoder { - e.quoteMapKeys = v - return e -} - -// ArraysWithOneElementPerLine sets up the encoder to encode arrays -// with more than one element on multiple lines instead of one. -// -// For example: -// -// A = [1,2,3] -// -// Becomes -// -// A = [ -// 1, -// 2, -// 3, -// ] -func (e *Encoder) ArraysWithOneElementPerLine(v bool) *Encoder { - e.arraysOneElementPerLine = v - return e -} - -// Order allows to change in which order fields will be written to the output stream. -func (e *Encoder) Order(ord MarshalOrder) *Encoder { - e.order = ord - return e -} - -// Indentation allows to change indentation when marshalling. -func (e *Encoder) Indentation(indent string) *Encoder { - e.indentation = indent - return e -} - -// SetTagName allows changing default tag "toml" -func (e *Encoder) SetTagName(v string) *Encoder { - e.tag = v - return e -} - -// SetTagComment allows changing default tag "comment" -func (e *Encoder) SetTagComment(v string) *Encoder { - e.comment = v - return e -} - -// SetTagCommented allows changing default tag "commented" -func (e *Encoder) SetTagCommented(v string) *Encoder { - e.commented = v - return e -} - -// SetTagMultiline allows changing default tag "multiline" -func (e *Encoder) SetTagMultiline(v string) *Encoder { - e.multiline = v - return e -} - -// PromoteAnonymous allows to change how anonymous struct fields are marshaled. -// Usually, they are marshaled as if the inner exported fields were fields in -// the outer struct. However, if an anonymous struct field is given a name in -// its TOML tag, it is treated like a regular struct field with that name. -// rather than being anonymous. -// -// In case anonymous promotion is enabled, all anonymous structs are promoted -// and treated like regular struct fields. -func (e *Encoder) PromoteAnonymous(promote bool) *Encoder { - e.promoteAnon = promote - return e -} - -// CompactComments removes the new line before each comment in the tree. -func (e *Encoder) CompactComments(cc bool) *Encoder { - e.compactComments = cc - return e -} - -func (e *Encoder) marshal(v interface{}) ([]byte, error) { - // Check if indentation is valid - for _, char := range e.indentation { - if !isSpace(char) { - return []byte{}, fmt.Errorf("invalid indentation: must only contains space or tab characters") - } - } - - mtype := reflect.TypeOf(v) - if mtype == nil { - return []byte{}, errors.New("nil cannot be marshaled to TOML") - } - - switch mtype.Kind() { - case reflect.Struct, reflect.Map: - case reflect.Ptr: - if mtype.Elem().Kind() != reflect.Struct { - return []byte{}, errors.New("Only pointer to struct can be marshaled to TOML") - } - if reflect.ValueOf(v).IsNil() { - return []byte{}, errors.New("nil pointer cannot be marshaled to TOML") - } - default: - return []byte{}, errors.New("Only a struct or map can be marshaled to TOML") - } - - sval := reflect.ValueOf(v) - if isCustomMarshaler(mtype) { - return callCustomMarshaler(sval) - } - if isTextMarshaler(mtype) { - return callTextMarshaler(sval) - } - t, err := e.valueToTree(mtype, sval) - if err != nil { - return []byte{}, err - } - - var buf bytes.Buffer - _, err = t.writeToOrdered(&buf, "", "", 0, e.arraysOneElementPerLine, e.order, e.indentation, e.compactComments, false) - - return buf.Bytes(), err -} - -// Create next tree with a position based on Encoder.line -func (e *Encoder) nextTree() *Tree { - return newTreeWithPosition(Position{Line: e.line, Col: 1}) -} - -// Convert given marshal struct or map value to toml tree -func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) { - if mtype.Kind() == reflect.Ptr { - return e.valueToTree(mtype.Elem(), mval.Elem()) - } - tval := e.nextTree() - switch mtype.Kind() { - case reflect.Struct: - switch mval.Interface().(type) { - case Tree: - reflect.ValueOf(tval).Elem().Set(mval) - default: - for i := 0; i < mtype.NumField(); i++ { - mtypef, mvalf := mtype.Field(i), mval.Field(i) - opts := tomlOptions(mtypef, e.annotation) - if opts.include && ((mtypef.Type.Kind() != reflect.Interface && !opts.omitempty) || !isZero(mvalf)) { - val, err := e.valueToToml(mtypef.Type, mvalf) - if err != nil { - return nil, err - } - if tree, ok := val.(*Tree); ok && mtypef.Anonymous && !opts.nameFromTag && !e.promoteAnon { - e.appendTree(tval, tree) - } else { - val = e.wrapTomlValue(val, tval) - tval.SetPathWithOptions([]string{opts.name}, SetOptions{ - Comment: opts.comment, - Commented: opts.commented, - Multiline: opts.multiline, - Literal: opts.literal, - }, val) - } - } - } - } - case reflect.Map: - keys := mval.MapKeys() - if e.order == OrderPreserve && len(keys) > 0 { - // Sorting []reflect.Value is not straight forward. - // - // OrderPreserve will support deterministic results when string is used - // as the key to maps. - typ := keys[0].Type() - kind := keys[0].Kind() - if kind == reflect.String { - ikeys := make([]string, len(keys)) - for i := range keys { - ikeys[i] = keys[i].Interface().(string) - } - sort.Strings(ikeys) - for i := range ikeys { - keys[i] = reflect.ValueOf(ikeys[i]).Convert(typ) - } - } - } - for _, key := range keys { - mvalf := mval.MapIndex(key) - if (mtype.Elem().Kind() == reflect.Ptr || mtype.Elem().Kind() == reflect.Interface) && mvalf.IsNil() { - continue - } - val, err := e.valueToToml(mtype.Elem(), mvalf) - if err != nil { - return nil, err - } - val = e.wrapTomlValue(val, tval) - if e.quoteMapKeys { - keyStr, err := tomlValueStringRepresentation(key.String(), "", "", e.order, e.arraysOneElementPerLine) - if err != nil { - return nil, err - } - tval.SetPath([]string{keyStr}, val) - } else { - tval.SetPath([]string{key.String()}, val) - } - } - } - return tval, nil -} - -// Convert given marshal slice to slice of Toml trees -func (e *Encoder) valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) { - tval := make([]*Tree, mval.Len(), mval.Len()) - for i := 0; i < mval.Len(); i++ { - val, err := e.valueToTree(mtype.Elem(), mval.Index(i)) - if err != nil { - return nil, err - } - tval[i] = val - } - return tval, nil -} - -// Convert given marshal slice to slice of toml values -func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) { - tval := make([]interface{}, mval.Len(), mval.Len()) - for i := 0; i < mval.Len(); i++ { - val, err := e.valueToToml(mtype.Elem(), mval.Index(i)) - if err != nil { - return nil, err - } - tval[i] = val - } - return tval, nil -} - -// Convert given marshal value to toml value -func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) { - if mtype.Kind() == reflect.Ptr { - switch { - case isCustomMarshaler(mtype): - return callCustomMarshaler(mval) - case isTextMarshaler(mtype): - b, err := callTextMarshaler(mval) - return string(b), err - default: - return e.valueToToml(mtype.Elem(), mval.Elem()) - } - } - if mtype.Kind() == reflect.Interface { - return e.valueToToml(mval.Elem().Type(), mval.Elem()) - } - switch { - case isCustomMarshaler(mtype): - return callCustomMarshaler(mval) - case isTextMarshaler(mtype): - b, err := callTextMarshaler(mval) - return string(b), err - case isTree(mtype): - return e.valueToTree(mtype, mval) - case isOtherSequence(mtype), isCustomMarshalerSequence(mtype), isTextMarshalerSequence(mtype): - return e.valueToOtherSlice(mtype, mval) - case isTreeSequence(mtype): - return e.valueToTreeSlice(mtype, mval) - default: - switch mtype.Kind() { - case reflect.Bool: - return mval.Bool(), nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) { - return fmt.Sprint(mval), nil - } - return mval.Int(), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return mval.Uint(), nil - case reflect.Float32, reflect.Float64: - return mval.Float(), nil - case reflect.String: - return mval.String(), nil - case reflect.Struct: - return mval.Interface(), nil - default: - return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind()) - } - } -} - -func (e *Encoder) appendTree(t, o *Tree) error { - for key, value := range o.values { - if _, ok := t.values[key]; ok { - continue - } - if tomlValue, ok := value.(*tomlValue); ok { - tomlValue.position.Col = t.position.Col - } - t.values[key] = value - } - return nil -} - -// Create a toml value with the current line number as the position line -func (e *Encoder) wrapTomlValue(val interface{}, parent *Tree) interface{} { - _, isTree := val.(*Tree) - _, isTreeS := val.([]*Tree) - if isTree || isTreeS { - e.line++ - return val - } - - ret := &tomlValue{ - value: val, - position: Position{ - e.line, - parent.position.Col, - }, - } - e.line++ - return ret -} - -// Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v. -// Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for -// sub-structs, and only definite types can be unmarshaled. -func (t *Tree) Unmarshal(v interface{}) error { - d := Decoder{tval: t, tagName: tagFieldName} - return d.unmarshal(v) -} - -// Marshal returns the TOML encoding of Tree. -// See Marshal() documentation for types mapping table. -func (t *Tree) Marshal() ([]byte, error) { - var buf bytes.Buffer - _, err := t.WriteTo(&buf) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// Unmarshal parses the TOML-encoded data and stores the result in the value -// pointed to by v. Behavior is similar to the Go json encoder, except that there -// is no concept of an Unmarshaler interface or UnmarshalTOML function for -// sub-structs, and currently only definite types can be unmarshaled to (i.e. no -// `interface{}`). -// -// The following struct annotations are supported: -// -// toml:"Field" Overrides the field's name to map to. -// default:"foo" Provides a default value. -// -// For default values, only fields of the following types are supported: -// * string -// * bool -// * int -// * int64 -// * float64 -// -// See Marshal() documentation for types mapping table. -func Unmarshal(data []byte, v interface{}) error { - t, err := LoadReader(bytes.NewReader(data)) - if err != nil { - return err - } - return t.Unmarshal(v) -} - -// Decoder reads and decodes TOML values from an input stream. -type Decoder struct { - r io.Reader - tval *Tree - encOpts - tagName string - strict bool - visitor visitorState -} - -// NewDecoder returns a new decoder that reads from r. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{ - r: r, - encOpts: encOptsDefaults, - tagName: tagFieldName, - } -} - -// Decode reads a TOML-encoded value from it's input -// and unmarshals it in the value pointed at by v. -// -// See the documentation for Marshal for details. -func (d *Decoder) Decode(v interface{}) error { - var err error - d.tval, err = LoadReader(d.r) - if err != nil { - return err - } - return d.unmarshal(v) -} - -// SetTagName allows changing default tag "toml" -func (d *Decoder) SetTagName(v string) *Decoder { - d.tagName = v - return d -} - -// Strict allows changing to strict decoding. Any fields that are found in the -// input data and do not have a corresponding struct member cause an error. -func (d *Decoder) Strict(strict bool) *Decoder { - d.strict = strict - return d -} - -func (d *Decoder) unmarshal(v interface{}) error { - mtype := reflect.TypeOf(v) - if mtype == nil { - return errors.New("nil cannot be unmarshaled from TOML") - } - if mtype.Kind() != reflect.Ptr { - return errors.New("only a pointer to struct or map can be unmarshaled from TOML") - } - - elem := mtype.Elem() - - switch elem.Kind() { - case reflect.Struct, reflect.Map: - case reflect.Interface: - elem = mapStringInterfaceType - default: - return errors.New("only a pointer to struct or map can be unmarshaled from TOML") - } - - if reflect.ValueOf(v).IsNil() { - return errors.New("nil pointer cannot be unmarshaled from TOML") - } - - vv := reflect.ValueOf(v).Elem() - - if d.strict { - d.visitor = newVisitorState(d.tval) - } - - sval, err := d.valueFromTree(elem, d.tval, &vv) - if err != nil { - return err - } - if err := d.visitor.validate(); err != nil { - return err - } - reflect.ValueOf(v).Elem().Set(sval) - return nil -} - -// Convert toml tree to marshal struct or map, using marshal type. When mval1 -// is non-nil, merge fields into the given value instead of allocating a new one. -func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree, mval1 *reflect.Value) (reflect.Value, error) { - if mtype.Kind() == reflect.Ptr { - return d.unwrapPointer(mtype, tval, mval1) - } - - // Check if pointer to value implements the Unmarshaler interface. - if mvalPtr := reflect.New(mtype); isCustomUnmarshaler(mvalPtr.Type()) { - d.visitor.visitAll() - - if tval == nil { - return mvalPtr.Elem(), nil - } - - if err := callCustomUnmarshaler(mvalPtr, tval.ToMap()); err != nil { - return reflect.ValueOf(nil), fmt.Errorf("unmarshal toml: %v", err) - } - return mvalPtr.Elem(), nil - } - - var mval reflect.Value - switch mtype.Kind() { - case reflect.Struct: - if mval1 != nil { - mval = *mval1 - } else { - mval = reflect.New(mtype).Elem() - } - - switch mval.Interface().(type) { - case Tree: - mval.Set(reflect.ValueOf(tval).Elem()) - default: - for i := 0; i < mtype.NumField(); i++ { - mtypef := mtype.Field(i) - an := annotation{tag: d.tagName} - opts := tomlOptions(mtypef, an) - if !opts.include { - continue - } - baseKey := opts.name - keysToTry := []string{ - baseKey, - strings.ToLower(baseKey), - strings.ToTitle(baseKey), - strings.ToLower(string(baseKey[0])) + baseKey[1:], - } - - found := false - if tval != nil { - for _, key := range keysToTry { - exists := tval.HasPath([]string{key}) - if !exists { - continue - } - - d.visitor.push(key) - val := tval.GetPath([]string{key}) - fval := mval.Field(i) - mvalf, err := d.valueFromToml(mtypef.Type, val, &fval) - if err != nil { - return mval, formatError(err, tval.GetPositionPath([]string{key})) - } - mval.Field(i).Set(mvalf) - found = true - d.visitor.pop() - break - } - } - - if !found && opts.defaultValue != "" { - mvalf := mval.Field(i) - var val interface{} - var err error - switch mvalf.Kind() { - case reflect.String: - val = opts.defaultValue - case reflect.Bool: - val, err = strconv.ParseBool(opts.defaultValue) - case reflect.Uint: - val, err = strconv.ParseUint(opts.defaultValue, 10, 0) - case reflect.Uint8: - val, err = strconv.ParseUint(opts.defaultValue, 10, 8) - case reflect.Uint16: - val, err = strconv.ParseUint(opts.defaultValue, 10, 16) - case reflect.Uint32: - val, err = strconv.ParseUint(opts.defaultValue, 10, 32) - case reflect.Uint64: - val, err = strconv.ParseUint(opts.defaultValue, 10, 64) - case reflect.Int: - val, err = strconv.ParseInt(opts.defaultValue, 10, 0) - case reflect.Int8: - val, err = strconv.ParseInt(opts.defaultValue, 10, 8) - case reflect.Int16: - val, err = strconv.ParseInt(opts.defaultValue, 10, 16) - case reflect.Int32: - val, err = strconv.ParseInt(opts.defaultValue, 10, 32) - case reflect.Int64: - // Check if the provided number has a non-numeric extension. - var hasExtension bool - if len(opts.defaultValue) > 0 { - lastChar := opts.defaultValue[len(opts.defaultValue)-1] - if lastChar < '0' || lastChar > '9' { - hasExtension = true - } - } - // If the value is a time.Duration with extension, parse as duration. - // If the value is an int64 or a time.Duration without extension, parse as number. - if hasExtension && mvalf.Type().String() == "time.Duration" { - val, err = time.ParseDuration(opts.defaultValue) - } else { - val, err = strconv.ParseInt(opts.defaultValue, 10, 64) - } - case reflect.Float32: - val, err = strconv.ParseFloat(opts.defaultValue, 32) - case reflect.Float64: - val, err = strconv.ParseFloat(opts.defaultValue, 64) - default: - return mvalf, fmt.Errorf("unsupported field type for default option") - } - - if err != nil { - return mvalf, err - } - mvalf.Set(reflect.ValueOf(val).Convert(mvalf.Type())) - } - - // save the old behavior above and try to check structs - if !found && opts.defaultValue == "" && mtypef.Type.Kind() == reflect.Struct { - tmpTval := tval - if !mtypef.Anonymous { - tmpTval = nil - } - fval := mval.Field(i) - v, err := d.valueFromTree(mtypef.Type, tmpTval, &fval) - if err != nil { - return v, err - } - mval.Field(i).Set(v) - } - } - } - case reflect.Map: - mval = reflect.MakeMap(mtype) - for _, key := range tval.Keys() { - d.visitor.push(key) - // TODO: path splits key - val := tval.GetPath([]string{key}) - mvalf, err := d.valueFromToml(mtype.Elem(), val, nil) - if err != nil { - return mval, formatError(err, tval.GetPositionPath([]string{key})) - } - mval.SetMapIndex(reflect.ValueOf(key).Convert(mtype.Key()), mvalf) - d.visitor.pop() - } - } - return mval, nil -} - -// Convert toml value to marshal struct/map slice, using marshal type -func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) { - mval, err := makeSliceOrArray(mtype, len(tval)) - if err != nil { - return mval, err - } - - for i := 0; i < len(tval); i++ { - d.visitor.push(strconv.Itoa(i)) - val, err := d.valueFromTree(mtype.Elem(), tval[i], nil) - if err != nil { - return mval, err - } - mval.Index(i).Set(val) - d.visitor.pop() - } - return mval, nil -} - -// Convert toml value to marshal primitive slice, using marshal type -func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) { - mval, err := makeSliceOrArray(mtype, len(tval)) - if err != nil { - return mval, err - } - - for i := 0; i < len(tval); i++ { - val, err := d.valueFromToml(mtype.Elem(), tval[i], nil) - if err != nil { - return mval, err - } - mval.Index(i).Set(val) - } - return mval, nil -} - -// Convert toml value to marshal primitive slice, using marshal type -func (d *Decoder) valueFromOtherSliceI(mtype reflect.Type, tval interface{}) (reflect.Value, error) { - val := reflect.ValueOf(tval) - length := val.Len() - - mval, err := makeSliceOrArray(mtype, length) - if err != nil { - return mval, err - } - - for i := 0; i < length; i++ { - val, err := d.valueFromToml(mtype.Elem(), val.Index(i).Interface(), nil) - if err != nil { - return mval, err - } - mval.Index(i).Set(val) - } - return mval, nil -} - -// Create a new slice or a new array with specified length -func makeSliceOrArray(mtype reflect.Type, tLength int) (reflect.Value, error) { - var mval reflect.Value - switch mtype.Kind() { - case reflect.Slice: - mval = reflect.MakeSlice(mtype, tLength, tLength) - case reflect.Array: - mval = reflect.New(reflect.ArrayOf(mtype.Len(), mtype.Elem())).Elem() - if tLength > mtype.Len() { - return mval, fmt.Errorf("unmarshal: TOML array length (%v) exceeds destination array length (%v)", tLength, mtype.Len()) - } - } - return mval, nil -} - -// Convert toml value to marshal value, using marshal type. When mval1 is non-nil -// and the given type is a struct value, merge fields into it. -func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { - if mtype.Kind() == reflect.Ptr { - return d.unwrapPointer(mtype, tval, mval1) - } - - switch t := tval.(type) { - case *Tree: - var mval11 *reflect.Value - if mtype.Kind() == reflect.Struct { - mval11 = mval1 - } - - if isTree(mtype) { - return d.valueFromTree(mtype, t, mval11) - } - - if mtype.Kind() == reflect.Interface { - if mval1 == nil || mval1.IsNil() { - return d.valueFromTree(reflect.TypeOf(map[string]interface{}{}), t, nil) - } else { - return d.valueFromToml(mval1.Elem().Type(), t, nil) - } - } - - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval) - case []*Tree: - if isTreeSequence(mtype) { - return d.valueFromTreeSlice(mtype, t) - } - if mtype.Kind() == reflect.Interface { - if mval1 == nil || mval1.IsNil() { - return d.valueFromTreeSlice(reflect.TypeOf([]map[string]interface{}{}), t) - } else { - ival := mval1.Elem() - return d.valueFromToml(mval1.Elem().Type(), t, &ival) - } - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval) - case []interface{}: - d.visitor.visit() - if isOtherSequence(mtype) { - return d.valueFromOtherSlice(mtype, t) - } - if mtype.Kind() == reflect.Interface { - if mval1 == nil || mval1.IsNil() { - return d.valueFromOtherSlice(reflect.TypeOf([]interface{}{}), t) - } else { - ival := mval1.Elem() - return d.valueFromToml(mval1.Elem().Type(), t, &ival) - } - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval) - default: - d.visitor.visit() - mvalPtr := reflect.New(mtype) - - // Check if pointer to value implements the Unmarshaler interface. - if isCustomUnmarshaler(mvalPtr.Type()) { - if err := callCustomUnmarshaler(mvalPtr, tval); err != nil { - return reflect.ValueOf(nil), fmt.Errorf("unmarshal toml: %v", err) - } - return mvalPtr.Elem(), nil - } - - // Check if pointer to value implements the encoding.TextUnmarshaler. - if isTextUnmarshaler(mvalPtr.Type()) && !isTimeType(mtype) { - if err := d.unmarshalText(tval, mvalPtr); err != nil { - return reflect.ValueOf(nil), fmt.Errorf("unmarshal text: %v", err) - } - return mvalPtr.Elem(), nil - } - - switch mtype.Kind() { - case reflect.Bool, reflect.Struct: - val := reflect.ValueOf(tval) - - switch val.Type() { - case localDateType: - localDate := val.Interface().(LocalDate) - switch mtype { - case timeType: - return reflect.ValueOf(time.Date(localDate.Year, localDate.Month, localDate.Day, 0, 0, 0, 0, time.Local)), nil - } - case localDateTimeType: - localDateTime := val.Interface().(LocalDateTime) - switch mtype { - case timeType: - return reflect.ValueOf(time.Date( - localDateTime.Date.Year, - localDateTime.Date.Month, - localDateTime.Date.Day, - localDateTime.Time.Hour, - localDateTime.Time.Minute, - localDateTime.Time.Second, - localDateTime.Time.Nanosecond, - time.Local)), nil - } - } - - // if this passes for when mtype is reflect.Struct, tval is a time.LocalTime - if !val.Type().ConvertibleTo(mtype) { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.String: - val := reflect.ValueOf(tval) - // stupidly, int64 is convertible to string. So special case this. - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - val := reflect.ValueOf(tval) - if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) && val.Kind() == reflect.String { - d, err := time.ParseDuration(val.String()) - if err != nil { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v. %s", tval, tval, mtype.String(), err) - } - return reflect.ValueOf(d), nil - } - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Convert(reflect.TypeOf(int64(0))).Int()) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - - if val.Type().Kind() != reflect.Uint64 && val.Convert(reflect.TypeOf(int(1))).Int() < 0 { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String()) - } - if reflect.Indirect(reflect.New(mtype)).OverflowUint(val.Convert(reflect.TypeOf(uint64(0))).Uint()) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Float32, reflect.Float64: - val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Convert(reflect.TypeOf(float64(0))).Float()) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Interface: - if mval1 == nil || mval1.IsNil() { - return reflect.ValueOf(tval), nil - } else { - ival := mval1.Elem() - return d.valueFromToml(mval1.Elem().Type(), t, &ival) - } - case reflect.Slice, reflect.Array: - if isOtherSequence(mtype) && isOtherSequence(reflect.TypeOf(t)) { - return d.valueFromOtherSliceI(mtype, t) - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) - default: - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) - } - } -} - -func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { - var melem *reflect.Value - - if mval1 != nil && !mval1.IsNil() && (mtype.Elem().Kind() == reflect.Struct || mtype.Elem().Kind() == reflect.Interface) { - elem := mval1.Elem() - melem = &elem - } - - val, err := d.valueFromToml(mtype.Elem(), tval, melem) - if err != nil { - return reflect.ValueOf(nil), err - } - mval := reflect.New(mtype.Elem()) - mval.Elem().Set(val) - return mval, nil -} - -func (d *Decoder) unmarshalText(tval interface{}, mval reflect.Value) error { - var buf bytes.Buffer - fmt.Fprint(&buf, tval) - return callTextUnmarshaler(mval, buf.Bytes()) -} - -func tomlOptions(vf reflect.StructField, an annotation) tomlOpts { - tag := vf.Tag.Get(an.tag) - parse := strings.Split(tag, ",") - var comment string - if c := vf.Tag.Get(an.comment); c != "" { - comment = c - } - commented, _ := strconv.ParseBool(vf.Tag.Get(an.commented)) - multiline, _ := strconv.ParseBool(vf.Tag.Get(an.multiline)) - literal, _ := strconv.ParseBool(vf.Tag.Get(an.literal)) - defaultValue := vf.Tag.Get(tagDefault) - result := tomlOpts{ - name: vf.Name, - nameFromTag: false, - comment: comment, - commented: commented, - multiline: multiline, - literal: literal, - include: true, - omitempty: false, - defaultValue: defaultValue, - } - if parse[0] != "" { - if parse[0] == "-" && len(parse) == 1 { - result.include = false - } else { - result.name = strings.Trim(parse[0], " ") - result.nameFromTag = true - } - } - if vf.PkgPath != "" { - result.include = false - } - if len(parse) > 1 && strings.Trim(parse[1], " ") == "omitempty" { - result.omitempty = true - } - if vf.Type.Kind() == reflect.Ptr { - result.omitempty = true - } - return result -} - -func isZero(val reflect.Value) bool { - switch val.Type().Kind() { - case reflect.Slice, reflect.Array, reflect.Map: - return val.Len() == 0 - default: - return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface()) - } -} - -func formatError(err error, pos Position) error { - if err.Error()[0] == '(' { // Error already contains position information - return err - } - return fmt.Errorf("%s: %s", pos, err) -} - -// visitorState keeps track of which keys were unmarshaled. -type visitorState struct { - tree *Tree - path []string - keys map[string]struct{} - active bool -} - -func newVisitorState(tree *Tree) visitorState { - path, result := []string{}, map[string]struct{}{} - insertKeys(path, result, tree) - return visitorState{ - tree: tree, - path: path[:0], - keys: result, - active: true, - } -} - -func (s *visitorState) push(key string) { - if s.active { - s.path = append(s.path, key) - } -} - -func (s *visitorState) pop() { - if s.active { - s.path = s.path[:len(s.path)-1] - } -} - -func (s *visitorState) visit() { - if s.active { - delete(s.keys, strings.Join(s.path, ".")) - } -} - -func (s *visitorState) visitAll() { - if s.active { - for k := range s.keys { - if strings.HasPrefix(k, strings.Join(s.path, ".")) { - delete(s.keys, k) - } - } - } -} - -func (s *visitorState) validate() error { - if !s.active { - return nil - } - undecoded := make([]string, 0, len(s.keys)) - for key := range s.keys { - undecoded = append(undecoded, key) - } - sort.Strings(undecoded) - if len(undecoded) > 0 { - return fmt.Errorf("undecoded keys: %q", undecoded) - } - return nil -} - -func insertKeys(path []string, m map[string]struct{}, tree *Tree) { - for k, v := range tree.values { - switch node := v.(type) { - case []*Tree: - for i, item := range node { - insertKeys(append(path, k, strconv.Itoa(i)), m, item) - } - case *Tree: - insertKeys(append(path, k), m, node) - case *tomlValue: - m[strings.Join(append(path, k), ".")] = struct{}{} - } - } -} diff --git a/test/performance/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml b/test/performance/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml deleted file mode 100644 index 792b72ed72..0000000000 --- a/test/performance/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml +++ /dev/null @@ -1,39 +0,0 @@ -title = "TOML Marshal Testing" - -[basic_lists] - floats = [12.3,45.6,78.9] - bools = [true,false,true] - dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z] - ints = [8001,8001,8002] - uints = [5002,5003] - strings = ["One","Two","Three"] - -[[subdocptrs]] - name = "Second" - -[basic_map] - one = "one" - two = "two" - -[subdoc] - - [subdoc.second] - name = "Second" - - [subdoc.first] - name = "First" - -[basic] - uint = 5001 - bool = true - float = 123.4 - float64 = 123.456782132399 - int = 5000 - string = "Bite me" - date = 1979-05-27T07:32:00Z - -[[subdoclist]] - name = "List.First" - -[[subdoclist]] - name = "List.Second" diff --git a/test/performance/vendor/github.com/pelletier/go-toml/marshal_test.toml b/test/performance/vendor/github.com/pelletier/go-toml/marshal_test.toml deleted file mode 100644 index ba5e110bf0..0000000000 --- a/test/performance/vendor/github.com/pelletier/go-toml/marshal_test.toml +++ /dev/null @@ -1,39 +0,0 @@ -title = "TOML Marshal Testing" - -[basic] - bool = true - date = 1979-05-27T07:32:00Z - float = 123.4 - float64 = 123.456782132399 - int = 5000 - string = "Bite me" - uint = 5001 - -[basic_lists] - bools = [true,false,true] - dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z] - floats = [12.3,45.6,78.9] - ints = [8001,8001,8002] - strings = ["One","Two","Three"] - uints = [5002,5003] - -[basic_map] - one = "one" - two = "two" - -[subdoc] - - [subdoc.first] - name = "First" - - [subdoc.second] - name = "Second" - -[[subdoclist]] - name = "List.First" - -[[subdoclist]] - name = "List.Second" - -[[subdocptrs]] - name = "Second" diff --git a/test/performance/vendor/github.com/pelletier/go-toml/parser.go b/test/performance/vendor/github.com/pelletier/go-toml/parser.go deleted file mode 100644 index b3726d0dd8..0000000000 --- a/test/performance/vendor/github.com/pelletier/go-toml/parser.go +++ /dev/null @@ -1,507 +0,0 @@ -// TOML Parser. - -package toml - -import ( - "errors" - "fmt" - "math" - "reflect" - "strconv" - "strings" - "time" -) - -type tomlParser struct { - flowIdx int - flow []token - tree *Tree - currentTable []string - seenTableKeys []string -} - -type tomlParserStateFn func() tomlParserStateFn - -// Formats and panics an error message based on a token -func (p *tomlParser) raiseError(tok *token, msg string, args ...interface{}) { - panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...)) -} - -func (p *tomlParser) run() { - for state := p.parseStart; state != nil; { - state = state() - } -} - -func (p *tomlParser) peek() *token { - if p.flowIdx >= len(p.flow) { - return nil - } - return &p.flow[p.flowIdx] -} - -func (p *tomlParser) assume(typ tokenType) { - tok := p.getToken() - if tok == nil { - p.raiseError(tok, "was expecting token %s, but token stream is empty", tok) - } - if tok.typ != typ { - p.raiseError(tok, "was expecting token %s, but got %s instead", typ, tok) - } -} - -func (p *tomlParser) getToken() *token { - tok := p.peek() - if tok == nil { - return nil - } - p.flowIdx++ - return tok -} - -func (p *tomlParser) parseStart() tomlParserStateFn { - tok := p.peek() - - // end of stream, parsing is finished - if tok == nil { - return nil - } - - switch tok.typ { - case tokenDoubleLeftBracket: - return p.parseGroupArray - case tokenLeftBracket: - return p.parseGroup - case tokenKey: - return p.parseAssign - case tokenEOF: - return nil - case tokenError: - p.raiseError(tok, "parsing error: %s", tok.String()) - default: - p.raiseError(tok, "unexpected token %s", tok.typ) - } - return nil -} - -func (p *tomlParser) parseGroupArray() tomlParserStateFn { - startToken := p.getToken() // discard the [[ - key := p.getToken() - if key.typ != tokenKeyGroupArray { - p.raiseError(key, "unexpected token %s, was expecting a table array key", key) - } - - // get or create table array element at the indicated part in the path - keys, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid table array key: %s", err) - } - p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries - destTree := p.tree.GetPath(keys) - var array []*Tree - if destTree == nil { - array = make([]*Tree, 0) - } else if target, ok := destTree.([]*Tree); ok && target != nil { - array = destTree.([]*Tree) - } else { - p.raiseError(key, "key %s is already assigned and not of type table array", key) - } - p.currentTable = keys - - // add a new tree to the end of the table array - newTree := newTree() - newTree.position = startToken.Position - array = append(array, newTree) - p.tree.SetPath(p.currentTable, array) - - // remove all keys that were children of this table array - prefix := key.val + "." - found := false - for ii := 0; ii < len(p.seenTableKeys); { - tableKey := p.seenTableKeys[ii] - if strings.HasPrefix(tableKey, prefix) { - p.seenTableKeys = append(p.seenTableKeys[:ii], p.seenTableKeys[ii+1:]...) - } else { - found = (tableKey == key.val) - ii++ - } - } - - // keep this key name from use by other kinds of assignments - if !found { - p.seenTableKeys = append(p.seenTableKeys, key.val) - } - - // move to next parser state - p.assume(tokenDoubleRightBracket) - return p.parseStart -} - -func (p *tomlParser) parseGroup() tomlParserStateFn { - startToken := p.getToken() // discard the [ - key := p.getToken() - if key.typ != tokenKeyGroup { - p.raiseError(key, "unexpected token %s, was expecting a table key", key) - } - for _, item := range p.seenTableKeys { - if item == key.val { - p.raiseError(key, "duplicated tables") - } - } - - p.seenTableKeys = append(p.seenTableKeys, key.val) - keys, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid table array key: %s", err) - } - if err := p.tree.createSubTree(keys, startToken.Position); err != nil { - p.raiseError(key, "%s", err) - } - destTree := p.tree.GetPath(keys) - if target, ok := destTree.(*Tree); ok && target != nil && target.inline { - p.raiseError(key, "could not re-define exist inline table or its sub-table : %s", - strings.Join(keys, ".")) - } - p.assume(tokenRightBracket) - p.currentTable = keys - return p.parseStart -} - -func (p *tomlParser) parseAssign() tomlParserStateFn { - key := p.getToken() - p.assume(tokenEqual) - - parsedKey, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid key: %s", err.Error()) - } - - value := p.parseRvalue() - var tableKey []string - if len(p.currentTable) > 0 { - tableKey = p.currentTable - } else { - tableKey = []string{} - } - - prefixKey := parsedKey[0 : len(parsedKey)-1] - tableKey = append(tableKey, prefixKey...) - - // find the table to assign, looking out for arrays of tables - var targetNode *Tree - switch node := p.tree.GetPath(tableKey).(type) { - case []*Tree: - targetNode = node[len(node)-1] - case *Tree: - targetNode = node - case nil: - // create intermediate - if err := p.tree.createSubTree(tableKey, key.Position); err != nil { - p.raiseError(key, "could not create intermediate group: %s", err) - } - targetNode = p.tree.GetPath(tableKey).(*Tree) - default: - p.raiseError(key, "Unknown table type for path: %s", - strings.Join(tableKey, ".")) - } - - if targetNode.inline { - p.raiseError(key, "could not add key or sub-table to exist inline table or its sub-table : %s", - strings.Join(tableKey, ".")) - } - - // assign value to the found table - keyVal := parsedKey[len(parsedKey)-1] - localKey := []string{keyVal} - finalKey := append(tableKey, keyVal) - if targetNode.GetPath(localKey) != nil { - p.raiseError(key, "The following key was defined twice: %s", - strings.Join(finalKey, ".")) - } - var toInsert interface{} - - switch value.(type) { - case *Tree, []*Tree: - toInsert = value - default: - toInsert = &tomlValue{value: value, position: key.Position} - } - targetNode.values[keyVal] = toInsert - return p.parseStart -} - -var errInvalidUnderscore = errors.New("invalid use of _ in number") - -func numberContainsInvalidUnderscore(value string) error { - // For large numbers, you may use underscores between digits to enhance - // readability. Each underscore must be surrounded by at least one digit on - // each side. - - hasBefore := false - for idx, r := range value { - if r == '_' { - if !hasBefore || idx+1 >= len(value) { - // can't end with an underscore - return errInvalidUnderscore - } - } - hasBefore = isDigit(r) - } - return nil -} - -var errInvalidUnderscoreHex = errors.New("invalid use of _ in hex number") - -func hexNumberContainsInvalidUnderscore(value string) error { - hasBefore := false - for idx, r := range value { - if r == '_' { - if !hasBefore || idx+1 >= len(value) { - // can't end with an underscore - return errInvalidUnderscoreHex - } - } - hasBefore = isHexDigit(r) - } - return nil -} - -func cleanupNumberToken(value string) string { - cleanedVal := strings.Replace(value, "_", "", -1) - return cleanedVal -} - -func (p *tomlParser) parseRvalue() interface{} { - tok := p.getToken() - if tok == nil || tok.typ == tokenEOF { - p.raiseError(tok, "expecting a value") - } - - switch tok.typ { - case tokenString: - return tok.val - case tokenTrue: - return true - case tokenFalse: - return false - case tokenInf: - if tok.val[0] == '-' { - return math.Inf(-1) - } - return math.Inf(1) - case tokenNan: - return math.NaN() - case tokenInteger: - cleanedVal := cleanupNumberToken(tok.val) - base := 10 - s := cleanedVal - checkInvalidUnderscore := numberContainsInvalidUnderscore - if len(cleanedVal) >= 3 && cleanedVal[0] == '0' { - switch cleanedVal[1] { - case 'x': - checkInvalidUnderscore = hexNumberContainsInvalidUnderscore - base = 16 - case 'o': - base = 8 - case 'b': - base = 2 - default: - panic("invalid base") // the lexer should catch this first - } - s = cleanedVal[2:] - } - - err := checkInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - - var val interface{} - val, err = strconv.ParseInt(s, base, 64) - if err == nil { - return val - } - - if s[0] != '-' { - if val, err = strconv.ParseUint(s, base, 64); err == nil { - return val - } - } - p.raiseError(tok, "%s", err) - case tokenFloat: - err := numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - cleanedVal := cleanupNumberToken(tok.val) - val, err := strconv.ParseFloat(cleanedVal, 64) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenLocalTime: - val, err := ParseLocalTime(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenLocalDate: - // a local date may be followed by: - // * nothing: this is a local date - // * a local time: this is a local date-time - - next := p.peek() - if next == nil || next.typ != tokenLocalTime { - val, err := ParseLocalDate(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - } - - localDate := tok - localTime := p.getToken() - - next = p.peek() - if next == nil || next.typ != tokenTimeOffset { - v := localDate.val + "T" + localTime.val - val, err := ParseLocalDateTime(v) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - } - - offset := p.getToken() - - layout := time.RFC3339Nano - v := localDate.val + "T" + localTime.val + offset.val - val, err := time.ParseInLocation(layout, v, time.UTC) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenLeftBracket: - return p.parseArray() - case tokenLeftCurlyBrace: - return p.parseInlineTable() - case tokenEqual: - p.raiseError(tok, "cannot have multiple equals for the same key") - case tokenError: - p.raiseError(tok, "%s", tok) - default: - panic(fmt.Errorf("unhandled token: %v", tok)) - } - - return nil -} - -func tokenIsComma(t *token) bool { - return t != nil && t.typ == tokenComma -} - -func (p *tomlParser) parseInlineTable() *Tree { - tree := newTree() - var previous *token -Loop: - for { - follow := p.peek() - if follow == nil || follow.typ == tokenEOF { - p.raiseError(follow, "unterminated inline table") - } - switch follow.typ { - case tokenRightCurlyBrace: - p.getToken() - break Loop - case tokenKey, tokenInteger, tokenString: - if !tokenIsComma(previous) && previous != nil { - p.raiseError(follow, "comma expected between fields in inline table") - } - key := p.getToken() - p.assume(tokenEqual) - - parsedKey, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid key: %s", err) - } - - value := p.parseRvalue() - tree.SetPath(parsedKey, value) - case tokenComma: - if tokenIsComma(previous) { - p.raiseError(follow, "need field between two commas in inline table") - } - p.getToken() - default: - p.raiseError(follow, "unexpected token type in inline table: %s", follow.String()) - } - previous = follow - } - if tokenIsComma(previous) { - p.raiseError(previous, "trailing comma at the end of inline table") - } - tree.inline = true - return tree -} - -func (p *tomlParser) parseArray() interface{} { - var array []interface{} - arrayType := reflect.TypeOf(newTree()) - for { - follow := p.peek() - if follow == nil || follow.typ == tokenEOF { - p.raiseError(follow, "unterminated array") - } - if follow.typ == tokenRightBracket { - p.getToken() - break - } - val := p.parseRvalue() - if reflect.TypeOf(val) != arrayType { - arrayType = nil - } - array = append(array, val) - follow = p.peek() - if follow == nil || follow.typ == tokenEOF { - p.raiseError(follow, "unterminated array") - } - if follow.typ != tokenRightBracket && follow.typ != tokenComma { - p.raiseError(follow, "missing comma") - } - if follow.typ == tokenComma { - p.getToken() - } - } - - // if the array is a mixed-type array or its length is 0, - // don't convert it to a table array - if len(array) <= 0 { - arrayType = nil - } - // An array of Trees is actually an array of inline - // tables, which is a shorthand for a table array. If the - // array was not converted from []interface{} to []*Tree, - // the two notations would not be equivalent. - if arrayType == reflect.TypeOf(newTree()) { - tomlArray := make([]*Tree, len(array)) - for i, v := range array { - tomlArray[i] = v.(*Tree) - } - return tomlArray - } - return array -} - -func parseToml(flow []token) *Tree { - result := newTree() - result.position = Position{1, 1} - parser := &tomlParser{ - flowIdx: 0, - flow: flow, - tree: result, - currentTable: make([]string, 0), - seenTableKeys: make([]string, 0), - } - parser.run() - return result -} diff --git a/test/performance/vendor/github.com/pelletier/go-toml/token.go b/test/performance/vendor/github.com/pelletier/go-toml/token.go deleted file mode 100644 index b437fdd3b6..0000000000 --- a/test/performance/vendor/github.com/pelletier/go-toml/token.go +++ /dev/null @@ -1,136 +0,0 @@ -package toml - -import "fmt" - -// Define tokens -type tokenType int - -const ( - eof = -(iota + 1) -) - -const ( - tokenError tokenType = iota - tokenEOF - tokenComment - tokenKey - tokenString - tokenInteger - tokenTrue - tokenFalse - tokenFloat - tokenInf - tokenNan - tokenEqual - tokenLeftBracket - tokenRightBracket - tokenLeftCurlyBrace - tokenRightCurlyBrace - tokenLeftParen - tokenRightParen - tokenDoubleLeftBracket - tokenDoubleRightBracket - tokenLocalDate - tokenLocalTime - tokenTimeOffset - tokenKeyGroup - tokenKeyGroupArray - tokenComma - tokenColon - tokenDollar - tokenStar - tokenQuestion - tokenDot - tokenDotDot - tokenEOL -) - -var tokenTypeNames = []string{ - "Error", - "EOF", - "Comment", - "Key", - "String", - "Integer", - "True", - "False", - "Float", - "Inf", - "NaN", - "=", - "[", - "]", - "{", - "}", - "(", - ")", - "]]", - "[[", - "LocalDate", - "LocalTime", - "TimeOffset", - "KeyGroup", - "KeyGroupArray", - ",", - ":", - "$", - "*", - "?", - ".", - "..", - "EOL", -} - -type token struct { - Position - typ tokenType - val string -} - -func (tt tokenType) String() string { - idx := int(tt) - if idx < len(tokenTypeNames) { - return tokenTypeNames[idx] - } - return "Unknown" -} - -func (t token) String() string { - switch t.typ { - case tokenEOF: - return "EOF" - case tokenError: - return t.val - } - - return fmt.Sprintf("%q", t.val) -} - -func isSpace(r rune) bool { - return r == ' ' || r == '\t' -} - -func isAlphanumeric(r rune) bool { - return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || r == '_' -} - -func isKeyChar(r rune) bool { - // Keys start with the first character that isn't whitespace or [ and end - // with the last non-whitespace character before the equals sign. Keys - // cannot contain a # character." - return !(r == '\r' || r == '\n' || r == eof || r == '=') -} - -func isKeyStartChar(r rune) bool { - return !(isSpace(r) || r == '\r' || r == '\n' || r == eof || r == '[') -} - -func isDigit(r rune) bool { - return '0' <= r && r <= '9' -} - -func isHexDigit(r rune) bool { - return isDigit(r) || - (r >= 'a' && r <= 'f') || - (r >= 'A' && r <= 'F') -} diff --git a/test/performance/vendor/github.com/pelletier/go-toml/toml.go b/test/performance/vendor/github.com/pelletier/go-toml/toml.go deleted file mode 100644 index 5541b941f8..0000000000 --- a/test/performance/vendor/github.com/pelletier/go-toml/toml.go +++ /dev/null @@ -1,533 +0,0 @@ -package toml - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - "strings" -) - -type tomlValue struct { - value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list - comment string - commented bool - multiline bool - literal bool - position Position -} - -// Tree is the result of the parsing of a TOML file. -type Tree struct { - values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree - comment string - commented bool - inline bool - position Position -} - -func newTree() *Tree { - return newTreeWithPosition(Position{}) -} - -func newTreeWithPosition(pos Position) *Tree { - return &Tree{ - values: make(map[string]interface{}), - position: pos, - } -} - -// TreeFromMap initializes a new Tree object using the given map. -func TreeFromMap(m map[string]interface{}) (*Tree, error) { - result, err := toTree(m) - if err != nil { - return nil, err - } - return result.(*Tree), nil -} - -// Position returns the position of the tree. -func (t *Tree) Position() Position { - return t.position -} - -// Has returns a boolean indicating if the given key exists. -func (t *Tree) Has(key string) bool { - if key == "" { - return false - } - return t.HasPath(strings.Split(key, ".")) -} - -// HasPath returns true if the given path of keys exists, false otherwise. -func (t *Tree) HasPath(keys []string) bool { - return t.GetPath(keys) != nil -} - -// Keys returns the keys of the toplevel tree (does not recurse). -func (t *Tree) Keys() []string { - keys := make([]string, len(t.values)) - i := 0 - for k := range t.values { - keys[i] = k - i++ - } - return keys -} - -// Get the value at key in the Tree. -// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. -// If you need to retrieve non-bare keys, use GetPath. -// Returns nil if the path does not exist in the tree. -// If keys is of length zero, the current tree is returned. -func (t *Tree) Get(key string) interface{} { - if key == "" { - return t - } - return t.GetPath(strings.Split(key, ".")) -} - -// GetPath returns the element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetPath(keys []string) interface{} { - if len(keys) == 0 { - return t - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return nil - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return nil - } - subtree = node[len(node)-1] - default: - return nil // cannot navigate through other node types - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - return node.value - default: - return node - } -} - -// GetArray returns the value at key in the Tree. -// It returns []string, []int64, etc type if key has homogeneous lists -// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. -// Returns nil if the path does not exist in the tree. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetArray(key string) interface{} { - if key == "" { - return t - } - return t.GetArrayPath(strings.Split(key, ".")) -} - -// GetArrayPath returns the element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetArrayPath(keys []string) interface{} { - if len(keys) == 0 { - return t - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return nil - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return nil - } - subtree = node[len(node)-1] - default: - return nil // cannot navigate through other node types - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - switch n := node.value.(type) { - case []interface{}: - return getArray(n) - default: - return node.value - } - default: - return node - } -} - -// if homogeneous array, then return slice type object over []interface{} -func getArray(n []interface{}) interface{} { - var s []string - var i64 []int64 - var f64 []float64 - var bl []bool - for _, value := range n { - switch v := value.(type) { - case string: - s = append(s, v) - case int64: - i64 = append(i64, v) - case float64: - f64 = append(f64, v) - case bool: - bl = append(bl, v) - default: - return n - } - } - if len(s) == len(n) { - return s - } else if len(i64) == len(n) { - return i64 - } else if len(f64) == len(n) { - return f64 - } else if len(bl) == len(n) { - return bl - } - return n -} - -// GetPosition returns the position of the given key. -func (t *Tree) GetPosition(key string) Position { - if key == "" { - return t.position - } - return t.GetPositionPath(strings.Split(key, ".")) -} - -// SetPositionPath sets the position of element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree position is set. -func (t *Tree) SetPositionPath(keys []string, pos Position) { - if len(keys) == 0 { - t.position = pos - return - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return - } - subtree = node[len(node)-1] - default: - return - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - node.position = pos - return - case *Tree: - node.position = pos - return - case []*Tree: - // go to most recent element - if len(node) == 0 { - return - } - node[len(node)-1].position = pos - return - } -} - -// GetPositionPath returns the element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetPositionPath(keys []string) Position { - if len(keys) == 0 { - return t.position - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return Position{0, 0} - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return Position{0, 0} - } - subtree = node[len(node)-1] - default: - return Position{0, 0} - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - return node.position - case *Tree: - return node.position - case []*Tree: - // go to most recent element - if len(node) == 0 { - return Position{0, 0} - } - return node[len(node)-1].position - default: - return Position{0, 0} - } -} - -// GetDefault works like Get but with a default value -func (t *Tree) GetDefault(key string, def interface{}) interface{} { - val := t.Get(key) - if val == nil { - return def - } - return val -} - -// SetOptions arguments are supplied to the SetWithOptions and SetPathWithOptions functions to modify marshalling behaviour. -// The default values within the struct are valid default options. -type SetOptions struct { - Comment string - Commented bool - Multiline bool - Literal bool -} - -// SetWithOptions is the same as Set, but allows you to provide formatting -// instructions to the key, that will be used by Marshal(). -func (t *Tree) SetWithOptions(key string, opts SetOptions, value interface{}) { - t.SetPathWithOptions(strings.Split(key, "."), opts, value) -} - -// SetPathWithOptions is the same as SetPath, but allows you to provide -// formatting instructions to the key, that will be reused by Marshal(). -func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interface{}) { - subtree := t - for i, intermediateKey := range keys[:len(keys)-1] { - nextTree, exists := subtree.values[intermediateKey] - if !exists { - nextTree = newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) - subtree.values[intermediateKey] = nextTree // add new element here - } - switch node := nextTree.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - // create element if it does not exist - node = append(node, newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col})) - subtree.values[intermediateKey] = node - } - subtree = node[len(node)-1] - } - } - - var toInsert interface{} - - switch v := value.(type) { - case *Tree: - v.comment = opts.Comment - v.commented = opts.Commented - toInsert = value - case []*Tree: - for i := range v { - v[i].commented = opts.Commented - } - toInsert = value - case *tomlValue: - v.comment = opts.Comment - v.commented = opts.Commented - v.multiline = opts.Multiline - v.literal = opts.Literal - toInsert = v - default: - toInsert = &tomlValue{value: value, - comment: opts.Comment, - commented: opts.Commented, - multiline: opts.Multiline, - literal: opts.Literal, - position: Position{Line: subtree.position.Line + len(subtree.values) + 1, Col: subtree.position.Col}} - } - - subtree.values[keys[len(keys)-1]] = toInsert -} - -// Set an element in the tree. -// Key is a dot-separated path (e.g. a.b.c). -// Creates all necessary intermediate trees, if needed. -func (t *Tree) Set(key string, value interface{}) { - t.SetWithComment(key, "", false, value) -} - -// SetWithComment is the same as Set, but allows you to provide comment -// information to the key, that will be reused by Marshal(). -func (t *Tree) SetWithComment(key string, comment string, commented bool, value interface{}) { - t.SetPathWithComment(strings.Split(key, "."), comment, commented, value) -} - -// SetPath sets an element in the tree. -// Keys is an array of path elements (e.g. {"a","b","c"}). -// Creates all necessary intermediate trees, if needed. -func (t *Tree) SetPath(keys []string, value interface{}) { - t.SetPathWithComment(keys, "", false, value) -} - -// SetPathWithComment is the same as SetPath, but allows you to provide comment -// information to the key, that will be reused by Marshal(). -func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) { - t.SetPathWithOptions(keys, SetOptions{Comment: comment, Commented: commented}, value) -} - -// Delete removes a key from the tree. -// Key is a dot-separated path (e.g. a.b.c). -func (t *Tree) Delete(key string) error { - keys, err := parseKey(key) - if err != nil { - return err - } - return t.DeletePath(keys) -} - -// DeletePath removes a key from the tree. -// Keys is an array of path elements (e.g. {"a","b","c"}). -func (t *Tree) DeletePath(keys []string) error { - keyLen := len(keys) - if keyLen == 1 { - delete(t.values, keys[0]) - return nil - } - tree := t.GetPath(keys[:keyLen-1]) - item := keys[keyLen-1] - switch node := tree.(type) { - case *Tree: - delete(node.values, item) - return nil - } - return errors.New("no such key to delete") -} - -// createSubTree takes a tree and a key and create the necessary intermediate -// subtrees to create a subtree at that point. In-place. -// -// e.g. passing a.b.c will create (assuming tree is empty) tree[a], tree[a][b] -// and tree[a][b][c] -// -// Returns nil on success, error object on failure -func (t *Tree) createSubTree(keys []string, pos Position) error { - subtree := t - for i, intermediateKey := range keys { - nextTree, exists := subtree.values[intermediateKey] - if !exists { - tree := newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) - tree.position = pos - tree.inline = subtree.inline - subtree.values[intermediateKey] = tree - nextTree = tree - } - - switch node := nextTree.(type) { - case []*Tree: - subtree = node[len(node)-1] - case *Tree: - subtree = node - default: - return fmt.Errorf("unknown type for path %s (%s): %T (%#v)", - strings.Join(keys, "."), intermediateKey, nextTree, nextTree) - } - } - return nil -} - -// LoadBytes creates a Tree from a []byte. -func LoadBytes(b []byte) (tree *Tree, err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = fmt.Errorf("%s", r) - } - }() - - if len(b) >= 4 && (hasUTF32BigEndianBOM4(b) || hasUTF32LittleEndianBOM4(b)) { - b = b[4:] - } else if len(b) >= 3 && hasUTF8BOM3(b) { - b = b[3:] - } else if len(b) >= 2 && (hasUTF16BigEndianBOM2(b) || hasUTF16LittleEndianBOM2(b)) { - b = b[2:] - } - - tree = parseToml(lexToml(b)) - return -} - -func hasUTF16BigEndianBOM2(b []byte) bool { - return b[0] == 0xFE && b[1] == 0xFF -} - -func hasUTF16LittleEndianBOM2(b []byte) bool { - return b[0] == 0xFF && b[1] == 0xFE -} - -func hasUTF8BOM3(b []byte) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -func hasUTF32BigEndianBOM4(b []byte) bool { - return b[0] == 0x00 && b[1] == 0x00 && b[2] == 0xFE && b[3] == 0xFF -} - -func hasUTF32LittleEndianBOM4(b []byte) bool { - return b[0] == 0xFF && b[1] == 0xFE && b[2] == 0x00 && b[3] == 0x00 -} - -// LoadReader creates a Tree from any io.Reader. -func LoadReader(reader io.Reader) (tree *Tree, err error) { - inputBytes, err := ioutil.ReadAll(reader) - if err != nil { - return - } - tree, err = LoadBytes(inputBytes) - return -} - -// Load creates a Tree from a string. -func Load(content string) (tree *Tree, err error) { - return LoadBytes([]byte(content)) -} - -// LoadFile creates a Tree from a file. -func LoadFile(path string) (tree *Tree, err error) { - file, err := os.Open(path) - if err != nil { - return nil, err - } - defer file.Close() - return LoadReader(file) -} diff --git a/test/performance/vendor/github.com/pelletier/go-toml/tomlpub.go b/test/performance/vendor/github.com/pelletier/go-toml/tomlpub.go deleted file mode 100644 index 4136b46254..0000000000 --- a/test/performance/vendor/github.com/pelletier/go-toml/tomlpub.go +++ /dev/null @@ -1,71 +0,0 @@ -package toml - -// PubTOMLValue wrapping tomlValue in order to access all properties from outside. -type PubTOMLValue = tomlValue - -func (ptv *PubTOMLValue) Value() interface{} { - return ptv.value -} -func (ptv *PubTOMLValue) Comment() string { - return ptv.comment -} -func (ptv *PubTOMLValue) Commented() bool { - return ptv.commented -} -func (ptv *PubTOMLValue) Multiline() bool { - return ptv.multiline -} -func (ptv *PubTOMLValue) Position() Position { - return ptv.position -} - -func (ptv *PubTOMLValue) SetValue(v interface{}) { - ptv.value = v -} -func (ptv *PubTOMLValue) SetComment(s string) { - ptv.comment = s -} -func (ptv *PubTOMLValue) SetCommented(c bool) { - ptv.commented = c -} -func (ptv *PubTOMLValue) SetMultiline(m bool) { - ptv.multiline = m -} -func (ptv *PubTOMLValue) SetPosition(p Position) { - ptv.position = p -} - -// PubTree wrapping Tree in order to access all properties from outside. -type PubTree = Tree - -func (pt *PubTree) Values() map[string]interface{} { - return pt.values -} - -func (pt *PubTree) Comment() string { - return pt.comment -} - -func (pt *PubTree) Commented() bool { - return pt.commented -} - -func (pt *PubTree) Inline() bool { - return pt.inline -} - -func (pt *PubTree) SetValues(v map[string]interface{}) { - pt.values = v -} - -func (pt *PubTree) SetComment(c string) { - pt.comment = c -} - -func (pt *PubTree) SetCommented(c bool) { - pt.commented = c -} - -func (pt *PubTree) SetInline(i bool) { - pt.inline = i -} diff --git a/test/performance/vendor/github.com/pelletier/go-toml/tomltree_create.go b/test/performance/vendor/github.com/pelletier/go-toml/tomltree_create.go deleted file mode 100644 index 80353500a0..0000000000 --- a/test/performance/vendor/github.com/pelletier/go-toml/tomltree_create.go +++ /dev/null @@ -1,155 +0,0 @@ -package toml - -import ( - "fmt" - "reflect" - "time" -) - -var kindToType = [reflect.String + 1]reflect.Type{ - reflect.Bool: reflect.TypeOf(true), - reflect.String: reflect.TypeOf(""), - reflect.Float32: reflect.TypeOf(float64(1)), - reflect.Float64: reflect.TypeOf(float64(1)), - reflect.Int: reflect.TypeOf(int64(1)), - reflect.Int8: reflect.TypeOf(int64(1)), - reflect.Int16: reflect.TypeOf(int64(1)), - reflect.Int32: reflect.TypeOf(int64(1)), - reflect.Int64: reflect.TypeOf(int64(1)), - reflect.Uint: reflect.TypeOf(uint64(1)), - reflect.Uint8: reflect.TypeOf(uint64(1)), - reflect.Uint16: reflect.TypeOf(uint64(1)), - reflect.Uint32: reflect.TypeOf(uint64(1)), - reflect.Uint64: reflect.TypeOf(uint64(1)), -} - -// typeFor returns a reflect.Type for a reflect.Kind, or nil if none is found. -// supported values: -// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32 -func typeFor(k reflect.Kind) reflect.Type { - if k > 0 && int(k) < len(kindToType) { - return kindToType[k] - } - return nil -} - -func simpleValueCoercion(object interface{}) (interface{}, error) { - switch original := object.(type) { - case string, bool, int64, uint64, float64, time.Time: - return original, nil - case int: - return int64(original), nil - case int8: - return int64(original), nil - case int16: - return int64(original), nil - case int32: - return int64(original), nil - case uint: - return uint64(original), nil - case uint8: - return uint64(original), nil - case uint16: - return uint64(original), nil - case uint32: - return uint64(original), nil - case float32: - return float64(original), nil - case fmt.Stringer: - return original.String(), nil - case []interface{}: - value := reflect.ValueOf(original) - length := value.Len() - arrayValue := reflect.MakeSlice(value.Type(), 0, length) - for i := 0; i < length; i++ { - val := value.Index(i).Interface() - simpleValue, err := simpleValueCoercion(val) - if err != nil { - return nil, err - } - arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) - } - return arrayValue.Interface(), nil - default: - return nil, fmt.Errorf("cannot convert type %T to Tree", object) - } -} - -func sliceToTree(object interface{}) (interface{}, error) { - // arrays are a bit tricky, since they can represent either a - // collection of simple values, which is represented by one - // *tomlValue, or an array of tables, which is represented by an - // array of *Tree. - - // holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice - value := reflect.ValueOf(object) - insideType := value.Type().Elem() - length := value.Len() - if length > 0 { - insideType = reflect.ValueOf(value.Index(0).Interface()).Type() - } - if insideType.Kind() == reflect.Map { - // this is considered as an array of tables - tablesArray := make([]*Tree, 0, length) - for i := 0; i < length; i++ { - table := value.Index(i) - tree, err := toTree(table.Interface()) - if err != nil { - return nil, err - } - tablesArray = append(tablesArray, tree.(*Tree)) - } - return tablesArray, nil - } - - sliceType := typeFor(insideType.Kind()) - if sliceType == nil { - sliceType = insideType - } - - arrayValue := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, length) - - for i := 0; i < length; i++ { - val := value.Index(i).Interface() - simpleValue, err := simpleValueCoercion(val) - if err != nil { - return nil, err - } - arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) - } - return &tomlValue{value: arrayValue.Interface(), position: Position{}}, nil -} - -func toTree(object interface{}) (interface{}, error) { - value := reflect.ValueOf(object) - - if value.Kind() == reflect.Map { - values := map[string]interface{}{} - keys := value.MapKeys() - for _, key := range keys { - if key.Kind() != reflect.String { - if _, ok := key.Interface().(string); !ok { - return nil, fmt.Errorf("map key needs to be a string, not %T (%v)", key.Interface(), key.Kind()) - } - } - - v := value.MapIndex(key) - newValue, err := toTree(v.Interface()) - if err != nil { - return nil, err - } - values[key.String()] = newValue - } - return &Tree{values: values, position: Position{}}, nil - } - - if value.Kind() == reflect.Array || value.Kind() == reflect.Slice { - return sliceToTree(object) - } - - simpleValue, err := simpleValueCoercion(object) - if err != nil { - return nil, err - } - return &tomlValue{value: simpleValue, position: Position{}}, nil -} diff --git a/test/performance/vendor/github.com/pelletier/go-toml/tomltree_write.go b/test/performance/vendor/github.com/pelletier/go-toml/tomltree_write.go deleted file mode 100644 index c9afbdab76..0000000000 --- a/test/performance/vendor/github.com/pelletier/go-toml/tomltree_write.go +++ /dev/null @@ -1,552 +0,0 @@ -package toml - -import ( - "bytes" - "fmt" - "io" - "math" - "math/big" - "reflect" - "sort" - "strconv" - "strings" - "time" -) - -type valueComplexity int - -const ( - valueSimple valueComplexity = iota + 1 - valueComplex -) - -type sortNode struct { - key string - complexity valueComplexity -} - -// Encodes a string to a TOML-compliant multi-line string value -// This function is a clone of the existing encodeTomlString function, except that whitespace characters -// are preserved. Quotation marks and backslashes are also not escaped. -func encodeMultilineTomlString(value string, commented string) string { - var b bytes.Buffer - adjacentQuoteCount := 0 - - b.WriteString(commented) - for i, rr := range value { - if rr != '"' { - adjacentQuoteCount = 0 - } else { - adjacentQuoteCount++ - } - switch rr { - case '\b': - b.WriteString(`\b`) - case '\t': - b.WriteString("\t") - case '\n': - b.WriteString("\n" + commented) - case '\f': - b.WriteString(`\f`) - case '\r': - b.WriteString("\r") - case '"': - if adjacentQuoteCount >= 3 || i == len(value)-1 { - adjacentQuoteCount = 0 - b.WriteString(`\"`) - } else { - b.WriteString(`"`) - } - case '\\': - b.WriteString(`\`) - default: - intRr := uint16(rr) - if intRr < 0x001F { - b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) - } else { - b.WriteRune(rr) - } - } - } - return b.String() -} - -// Encodes a string to a TOML-compliant string value -func encodeTomlString(value string) string { - var b bytes.Buffer - - for _, rr := range value { - switch rr { - case '\b': - b.WriteString(`\b`) - case '\t': - b.WriteString(`\t`) - case '\n': - b.WriteString(`\n`) - case '\f': - b.WriteString(`\f`) - case '\r': - b.WriteString(`\r`) - case '"': - b.WriteString(`\"`) - case '\\': - b.WriteString(`\\`) - default: - intRr := uint16(rr) - if intRr < 0x001F { - b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) - } else { - b.WriteRune(rr) - } - } - } - return b.String() -} - -func tomlTreeStringRepresentation(t *Tree, ord MarshalOrder) (string, error) { - var orderedVals []sortNode - switch ord { - case OrderPreserve: - orderedVals = sortByLines(t) - default: - orderedVals = sortAlphabetical(t) - } - - var values []string - for _, node := range orderedVals { - k := node.key - v := t.values[k] - - repr, err := tomlValueStringRepresentation(v, "", "", ord, false) - if err != nil { - return "", err - } - values = append(values, quoteKeyIfNeeded(k)+" = "+repr) - } - return "{ " + strings.Join(values, ", ") + " }", nil -} - -func tomlValueStringRepresentation(v interface{}, commented string, indent string, ord MarshalOrder, arraysOneElementPerLine bool) (string, error) { - // this interface check is added to dereference the change made in the writeTo function. - // That change was made to allow this function to see formatting options. - tv, ok := v.(*tomlValue) - if ok { - v = tv.value - } else { - tv = &tomlValue{} - } - - switch value := v.(type) { - case uint64: - return strconv.FormatUint(value, 10), nil - case int64: - return strconv.FormatInt(value, 10), nil - case float64: - // Default bit length is full 64 - bits := 64 - // Float panics if nan is used - if !math.IsNaN(value) { - // if 32 bit accuracy is enough to exactly show, use 32 - _, acc := big.NewFloat(value).Float32() - if acc == big.Exact { - bits = 32 - } - } - if math.Trunc(value) == value { - return strings.ToLower(strconv.FormatFloat(value, 'f', 1, bits)), nil - } - return strings.ToLower(strconv.FormatFloat(value, 'f', -1, bits)), nil - case string: - if tv.multiline { - if tv.literal { - b := strings.Builder{} - b.WriteString("'''\n") - b.Write([]byte(value)) - b.WriteString("\n'''") - return b.String(), nil - } else { - return "\"\"\"\n" + encodeMultilineTomlString(value, commented) + "\"\"\"", nil - } - } - return "\"" + encodeTomlString(value) + "\"", nil - case []byte: - b, _ := v.([]byte) - return string(b), nil - case bool: - if value { - return "true", nil - } - return "false", nil - case time.Time: - return value.Format(time.RFC3339), nil - case LocalDate: - return value.String(), nil - case LocalDateTime: - return value.String(), nil - case LocalTime: - return value.String(), nil - case *Tree: - return tomlTreeStringRepresentation(value, ord) - case nil: - return "", nil - } - - rv := reflect.ValueOf(v) - - if rv.Kind() == reflect.Slice { - var values []string - for i := 0; i < rv.Len(); i++ { - item := rv.Index(i).Interface() - itemRepr, err := tomlValueStringRepresentation(item, commented, indent, ord, arraysOneElementPerLine) - if err != nil { - return "", err - } - values = append(values, itemRepr) - } - if arraysOneElementPerLine && len(values) > 1 { - stringBuffer := bytes.Buffer{} - valueIndent := indent + ` ` // TODO: move that to a shared encoder state - - stringBuffer.WriteString("[\n") - - for _, value := range values { - stringBuffer.WriteString(valueIndent) - stringBuffer.WriteString(commented + value) - stringBuffer.WriteString(`,`) - stringBuffer.WriteString("\n") - } - - stringBuffer.WriteString(indent + commented + "]") - - return stringBuffer.String(), nil - } - return "[" + strings.Join(values, ", ") + "]", nil - } - return "", fmt.Errorf("unsupported value type %T: %v", v, v) -} - -func getTreeArrayLine(trees []*Tree) (line int) { - // Prevent returning 0 for empty trees - line = int(^uint(0) >> 1) - // get lowest line number >= 0 - for _, tv := range trees { - if tv.position.Line < line || line == 0 { - line = tv.position.Line - } - } - return -} - -func sortByLines(t *Tree) (vals []sortNode) { - var ( - line int - lines []int - tv *Tree - tom *tomlValue - node sortNode - ) - vals = make([]sortNode, 0) - m := make(map[int]sortNode) - - for k := range t.values { - v := t.values[k] - switch v.(type) { - case *Tree: - tv = v.(*Tree) - line = tv.position.Line - node = sortNode{key: k, complexity: valueComplex} - case []*Tree: - line = getTreeArrayLine(v.([]*Tree)) - node = sortNode{key: k, complexity: valueComplex} - default: - tom = v.(*tomlValue) - line = tom.position.Line - node = sortNode{key: k, complexity: valueSimple} - } - lines = append(lines, line) - vals = append(vals, node) - m[line] = node - } - sort.Ints(lines) - - for i, line := range lines { - vals[i] = m[line] - } - - return vals -} - -func sortAlphabetical(t *Tree) (vals []sortNode) { - var ( - node sortNode - simpVals []string - compVals []string - ) - vals = make([]sortNode, 0) - m := make(map[string]sortNode) - - for k := range t.values { - v := t.values[k] - switch v.(type) { - case *Tree, []*Tree: - node = sortNode{key: k, complexity: valueComplex} - compVals = append(compVals, node.key) - default: - node = sortNode{key: k, complexity: valueSimple} - simpVals = append(simpVals, node.key) - } - vals = append(vals, node) - m[node.key] = node - } - - // Simples first to match previous implementation - sort.Strings(simpVals) - i := 0 - for _, key := range simpVals { - vals[i] = m[key] - i++ - } - - sort.Strings(compVals) - for _, key := range compVals { - vals[i] = m[key] - i++ - } - - return vals -} - -func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) { - return t.writeToOrdered(w, indent, keyspace, bytesCount, arraysOneElementPerLine, OrderAlphabetical, " ", false, false) -} - -func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool, ord MarshalOrder, indentString string, compactComments, parentCommented bool) (int64, error) { - var orderedVals []sortNode - - switch ord { - case OrderPreserve: - orderedVals = sortByLines(t) - default: - orderedVals = sortAlphabetical(t) - } - - for _, node := range orderedVals { - switch node.complexity { - case valueComplex: - k := node.key - v := t.values[k] - - combinedKey := quoteKeyIfNeeded(k) - if keyspace != "" { - combinedKey = keyspace + "." + combinedKey - } - - switch node := v.(type) { - // node has to be of those two types given how keys are sorted above - case *Tree: - tv, ok := t.values[k].(*Tree) - if !ok { - return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) - } - if tv.comment != "" { - comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1) - start := "# " - if strings.HasPrefix(comment, "#") { - start = "" - } - writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment) - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc - } - } - - var commented string - if parentCommented || t.commented || tv.commented { - commented = "# " - } - writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - bytesCount, err = node.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, compactComments, parentCommented || t.commented || tv.commented) - if err != nil { - return bytesCount, err - } - case []*Tree: - for _, subTree := range node { - var commented string - if parentCommented || t.commented || subTree.commented { - commented = "# " - } - writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - - bytesCount, err = subTree.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, compactComments, parentCommented || t.commented || subTree.commented) - if err != nil { - return bytesCount, err - } - } - } - default: // Simple - k := node.key - v, ok := t.values[k].(*tomlValue) - if !ok { - return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) - } - - var commented string - if parentCommented || t.commented || v.commented { - commented = "# " - } - repr, err := tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine) - if err != nil { - return bytesCount, err - } - - if v.comment != "" { - comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1) - start := "# " - if strings.HasPrefix(comment, "#") { - start = "" - } - if !compactComments { - writtenBytesCountComment, errc := writeStrings(w, "\n") - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc - } - } - writtenBytesCountComment, errc := writeStrings(w, indent, start, comment, "\n") - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc - } - } - - quotedKey := quoteKeyIfNeeded(k) - writtenBytesCount, err := writeStrings(w, indent, commented, quotedKey, " = ", repr, "\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - } - } - - return bytesCount, nil -} - -// quote a key if it does not fit the bare key format (A-Za-z0-9_-) -// quoted keys use the same rules as strings -func quoteKeyIfNeeded(k string) string { - // when encoding a map with the 'quoteMapKeys' option enabled, the tree will contain - // keys that have already been quoted. - // not an ideal situation, but good enough of a stop gap. - if len(k) >= 2 && k[0] == '"' && k[len(k)-1] == '"' { - return k - } - isBare := true - for _, r := range k { - if !isValidBareChar(r) { - isBare = false - break - } - } - if isBare { - return k - } - return quoteKey(k) -} - -func quoteKey(k string) string { - return "\"" + encodeTomlString(k) + "\"" -} - -func writeStrings(w io.Writer, s ...string) (int, error) { - var n int - for i := range s { - b, err := io.WriteString(w, s[i]) - n += b - if err != nil { - return n, err - } - } - return n, nil -} - -// WriteTo encode the Tree as Toml and writes it to the writer w. -// Returns the number of bytes written in case of success, or an error if anything happened. -func (t *Tree) WriteTo(w io.Writer) (int64, error) { - return t.writeTo(w, "", "", 0, false) -} - -// ToTomlString generates a human-readable representation of the current tree. -// Output spans multiple lines, and is suitable for ingest by a TOML parser. -// If the conversion cannot be performed, ToString returns a non-nil error. -func (t *Tree) ToTomlString() (string, error) { - b, err := t.Marshal() - if err != nil { - return "", err - } - return string(b), nil -} - -// String generates a human-readable representation of the current tree. -// Alias of ToString. Present to implement the fmt.Stringer interface. -func (t *Tree) String() string { - result, _ := t.ToTomlString() - return result -} - -// ToMap recursively generates a representation of the tree using Go built-in structures. -// The following types are used: -// -// * bool -// * float64 -// * int64 -// * string -// * uint64 -// * time.Time -// * map[string]interface{} (where interface{} is any of this list) -// * []interface{} (where interface{} is any of this list) -func (t *Tree) ToMap() map[string]interface{} { - result := map[string]interface{}{} - - for k, v := range t.values { - switch node := v.(type) { - case []*Tree: - var array []interface{} - for _, item := range node { - array = append(array, item.ToMap()) - } - result[k] = array - case *Tree: - result[k] = node.ToMap() - case *tomlValue: - result[k] = tomlValueToGo(node.value) - } - } - return result -} - -func tomlValueToGo(v interface{}) interface{} { - if tree, ok := v.(*Tree); ok { - return tree.ToMap() - } - - rv := reflect.ValueOf(v) - - if rv.Kind() != reflect.Slice { - return v - } - values := make([]interface{}, rv.Len()) - for i := 0; i < rv.Len(); i++ { - item := rv.Index(i).Interface() - values[i] = tomlValueToGo(item) - } - return values -} diff --git a/test/performance/vendor/github.com/pelletier/go-toml/tomltree_writepub.go b/test/performance/vendor/github.com/pelletier/go-toml/tomltree_writepub.go deleted file mode 100644 index fa326308cf..0000000000 --- a/test/performance/vendor/github.com/pelletier/go-toml/tomltree_writepub.go +++ /dev/null @@ -1,6 +0,0 @@ -package toml - -// ValueStringRepresentation transforms an interface{} value into its toml string representation. -func ValueStringRepresentation(v interface{}, commented string, indent string, ord MarshalOrder, arraysOneElementPerLine bool) (string, error) { - return tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine) -} diff --git a/test/performance/vendor/github.com/pelletier/go-toml/v2/README.md b/test/performance/vendor/github.com/pelletier/go-toml/v2/README.md index a63c3a7967..9f8439cc7d 100644 --- a/test/performance/vendor/github.com/pelletier/go-toml/v2/README.md +++ b/test/performance/vendor/github.com/pelletier/go-toml/v2/README.md @@ -140,6 +140,17 @@ fmt.Println(string(b)) [marshal]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Marshal +## Unstable API + +This API does not yet follow the backward compatibility guarantees of this +library. They provide early access to features that may have rough edges or an +API subject to change. + +### Parser + +Parser is the unstable API that allows iterative parsing of a TOML document at +the AST level. See https://pkg.go.dev/github.com/pelletier/go-toml/v2/unstable. + ## Benchmarks Execution time speedup compared to other Go TOML libraries: diff --git a/test/performance/vendor/github.com/pelletier/go-toml/v2/decode.go b/test/performance/vendor/github.com/pelletier/go-toml/v2/decode.go index 4af965360a..3a860d0f6a 100644 --- a/test/performance/vendor/github.com/pelletier/go-toml/v2/decode.go +++ b/test/performance/vendor/github.com/pelletier/go-toml/v2/decode.go @@ -5,6 +5,8 @@ import ( "math" "strconv" "time" + + "github.com/pelletier/go-toml/v2/unstable" ) func parseInteger(b []byte) (int64, error) { @@ -32,7 +34,7 @@ func parseLocalDate(b []byte) (LocalDate, error) { var date LocalDate if len(b) != 10 || b[4] != '-' || b[7] != '-' { - return date, newDecodeError(b, "dates are expected to have the format YYYY-MM-DD") + return date, unstable.NewParserError(b, "dates are expected to have the format YYYY-MM-DD") } var err error @@ -53,7 +55,7 @@ func parseLocalDate(b []byte) (LocalDate, error) { } if !isValidDate(date.Year, date.Month, date.Day) { - return LocalDate{}, newDecodeError(b, "impossible date") + return LocalDate{}, unstable.NewParserError(b, "impossible date") } return date, nil @@ -64,7 +66,7 @@ func parseDecimalDigits(b []byte) (int, error) { for i, c := range b { if c < '0' || c > '9' { - return 0, newDecodeError(b[i:i+1], "expected digit (0-9)") + return 0, unstable.NewParserError(b[i:i+1], "expected digit (0-9)") } v *= 10 v += int(c - '0') @@ -97,7 +99,7 @@ func parseDateTime(b []byte) (time.Time, error) { } else { const dateTimeByteLen = 6 if len(b) != dateTimeByteLen { - return time.Time{}, newDecodeError(b, "invalid date-time timezone") + return time.Time{}, unstable.NewParserError(b, "invalid date-time timezone") } var direction int switch b[0] { @@ -106,11 +108,11 @@ func parseDateTime(b []byte) (time.Time, error) { case '+': direction = +1 default: - return time.Time{}, newDecodeError(b[:1], "invalid timezone offset character") + return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset character") } if b[3] != ':' { - return time.Time{}, newDecodeError(b[3:4], "expected a : separator") + return time.Time{}, unstable.NewParserError(b[3:4], "expected a : separator") } hours, err := parseDecimalDigits(b[1:3]) @@ -118,7 +120,7 @@ func parseDateTime(b []byte) (time.Time, error) { return time.Time{}, err } if hours > 23 { - return time.Time{}, newDecodeError(b[:1], "invalid timezone offset hours") + return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset hours") } minutes, err := parseDecimalDigits(b[4:6]) @@ -126,7 +128,7 @@ func parseDateTime(b []byte) (time.Time, error) { return time.Time{}, err } if minutes > 59 { - return time.Time{}, newDecodeError(b[:1], "invalid timezone offset minutes") + return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset minutes") } seconds := direction * (hours*3600 + minutes*60) @@ -139,7 +141,7 @@ func parseDateTime(b []byte) (time.Time, error) { } if len(b) > 0 { - return time.Time{}, newDecodeError(b, "extra bytes at the end of the timezone") + return time.Time{}, unstable.NewParserError(b, "extra bytes at the end of the timezone") } t := time.Date( @@ -160,7 +162,7 @@ func parseLocalDateTime(b []byte) (LocalDateTime, []byte, error) { const localDateTimeByteMinLen = 11 if len(b) < localDateTimeByteMinLen { - return dt, nil, newDecodeError(b, "local datetimes are expected to have the format YYYY-MM-DDTHH:MM:SS[.NNNNNNNNN]") + return dt, nil, unstable.NewParserError(b, "local datetimes are expected to have the format YYYY-MM-DDTHH:MM:SS[.NNNNNNNNN]") } date, err := parseLocalDate(b[:10]) @@ -171,7 +173,7 @@ func parseLocalDateTime(b []byte) (LocalDateTime, []byte, error) { sep := b[10] if sep != 'T' && sep != ' ' && sep != 't' { - return dt, nil, newDecodeError(b[10:11], "datetime separator is expected to be T or a space") + return dt, nil, unstable.NewParserError(b[10:11], "datetime separator is expected to be T or a space") } t, rest, err := parseLocalTime(b[11:]) @@ -195,7 +197,7 @@ func parseLocalTime(b []byte) (LocalTime, []byte, error) { // check if b matches to have expected format HH:MM:SS[.NNNNNN] const localTimeByteLen = 8 if len(b) < localTimeByteLen { - return t, nil, newDecodeError(b, "times are expected to have the format HH:MM:SS[.NNNNNN]") + return t, nil, unstable.NewParserError(b, "times are expected to have the format HH:MM:SS[.NNNNNN]") } var err error @@ -206,10 +208,10 @@ func parseLocalTime(b []byte) (LocalTime, []byte, error) { } if t.Hour > 23 { - return t, nil, newDecodeError(b[0:2], "hour cannot be greater 23") + return t, nil, unstable.NewParserError(b[0:2], "hour cannot be greater 23") } if b[2] != ':' { - return t, nil, newDecodeError(b[2:3], "expecting colon between hours and minutes") + return t, nil, unstable.NewParserError(b[2:3], "expecting colon between hours and minutes") } t.Minute, err = parseDecimalDigits(b[3:5]) @@ -217,10 +219,10 @@ func parseLocalTime(b []byte) (LocalTime, []byte, error) { return t, nil, err } if t.Minute > 59 { - return t, nil, newDecodeError(b[3:5], "minutes cannot be greater 59") + return t, nil, unstable.NewParserError(b[3:5], "minutes cannot be greater 59") } if b[5] != ':' { - return t, nil, newDecodeError(b[5:6], "expecting colon between minutes and seconds") + return t, nil, unstable.NewParserError(b[5:6], "expecting colon between minutes and seconds") } t.Second, err = parseDecimalDigits(b[6:8]) @@ -229,7 +231,7 @@ func parseLocalTime(b []byte) (LocalTime, []byte, error) { } if t.Second > 60 { - return t, nil, newDecodeError(b[6:8], "seconds cannot be greater 60") + return t, nil, unstable.NewParserError(b[6:8], "seconds cannot be greater 60") } b = b[8:] @@ -242,7 +244,7 @@ func parseLocalTime(b []byte) (LocalTime, []byte, error) { for i, c := range b[1:] { if !isDigit(c) { if i == 0 { - return t, nil, newDecodeError(b[0:1], "need at least one digit after fraction point") + return t, nil, unstable.NewParserError(b[0:1], "need at least one digit after fraction point") } break } @@ -266,7 +268,7 @@ func parseLocalTime(b []byte) (LocalTime, []byte, error) { } if precision == 0 { - return t, nil, newDecodeError(b[:1], "nanoseconds need at least one digit") + return t, nil, unstable.NewParserError(b[:1], "nanoseconds need at least one digit") } t.Nanosecond = frac * nspow[precision] @@ -289,24 +291,24 @@ func parseFloat(b []byte) (float64, error) { } if cleaned[0] == '.' { - return 0, newDecodeError(b, "float cannot start with a dot") + return 0, unstable.NewParserError(b, "float cannot start with a dot") } if cleaned[len(cleaned)-1] == '.' { - return 0, newDecodeError(b, "float cannot end with a dot") + return 0, unstable.NewParserError(b, "float cannot end with a dot") } dotAlreadySeen := false for i, c := range cleaned { if c == '.' { if dotAlreadySeen { - return 0, newDecodeError(b[i:i+1], "float can have at most one decimal point") + return 0, unstable.NewParserError(b[i:i+1], "float can have at most one decimal point") } if !isDigit(cleaned[i-1]) { - return 0, newDecodeError(b[i-1:i+1], "float decimal point must be preceded by a digit") + return 0, unstable.NewParserError(b[i-1:i+1], "float decimal point must be preceded by a digit") } if !isDigit(cleaned[i+1]) { - return 0, newDecodeError(b[i:i+2], "float decimal point must be followed by a digit") + return 0, unstable.NewParserError(b[i:i+2], "float decimal point must be followed by a digit") } dotAlreadySeen = true } @@ -317,12 +319,12 @@ func parseFloat(b []byte) (float64, error) { start = 1 } if cleaned[start] == '0' && isDigit(cleaned[start+1]) { - return 0, newDecodeError(b, "float integer part cannot have leading zeroes") + return 0, unstable.NewParserError(b, "float integer part cannot have leading zeroes") } f, err := strconv.ParseFloat(string(cleaned), 64) if err != nil { - return 0, newDecodeError(b, "unable to parse float: %w", err) + return 0, unstable.NewParserError(b, "unable to parse float: %w", err) } return f, nil @@ -336,7 +338,7 @@ func parseIntHex(b []byte) (int64, error) { i, err := strconv.ParseInt(string(cleaned), 16, 64) if err != nil { - return 0, newDecodeError(b, "couldn't parse hexadecimal number: %w", err) + return 0, unstable.NewParserError(b, "couldn't parse hexadecimal number: %w", err) } return i, nil @@ -350,7 +352,7 @@ func parseIntOct(b []byte) (int64, error) { i, err := strconv.ParseInt(string(cleaned), 8, 64) if err != nil { - return 0, newDecodeError(b, "couldn't parse octal number: %w", err) + return 0, unstable.NewParserError(b, "couldn't parse octal number: %w", err) } return i, nil @@ -364,7 +366,7 @@ func parseIntBin(b []byte) (int64, error) { i, err := strconv.ParseInt(string(cleaned), 2, 64) if err != nil { - return 0, newDecodeError(b, "couldn't parse binary number: %w", err) + return 0, unstable.NewParserError(b, "couldn't parse binary number: %w", err) } return i, nil @@ -387,12 +389,12 @@ func parseIntDec(b []byte) (int64, error) { } if len(cleaned) > startIdx+1 && cleaned[startIdx] == '0' { - return 0, newDecodeError(b, "leading zero not allowed on decimal number") + return 0, unstable.NewParserError(b, "leading zero not allowed on decimal number") } i, err := strconv.ParseInt(string(cleaned), 10, 64) if err != nil { - return 0, newDecodeError(b, "couldn't parse decimal number: %w", err) + return 0, unstable.NewParserError(b, "couldn't parse decimal number: %w", err) } return i, nil @@ -409,11 +411,11 @@ func checkAndRemoveUnderscoresIntegers(b []byte) ([]byte, error) { } if b[start] == '_' { - return nil, newDecodeError(b[start:start+1], "number cannot start with underscore") + return nil, unstable.NewParserError(b[start:start+1], "number cannot start with underscore") } if b[len(b)-1] == '_' { - return nil, newDecodeError(b[len(b)-1:], "number cannot end with underscore") + return nil, unstable.NewParserError(b[len(b)-1:], "number cannot end with underscore") } // fast path @@ -435,7 +437,7 @@ func checkAndRemoveUnderscoresIntegers(b []byte) ([]byte, error) { c := b[i] if c == '_' { if !before { - return nil, newDecodeError(b[i-1:i+1], "number must have at least one digit between underscores") + return nil, unstable.NewParserError(b[i-1:i+1], "number must have at least one digit between underscores") } before = false } else { @@ -449,11 +451,11 @@ func checkAndRemoveUnderscoresIntegers(b []byte) ([]byte, error) { func checkAndRemoveUnderscoresFloats(b []byte) ([]byte, error) { if b[0] == '_' { - return nil, newDecodeError(b[0:1], "number cannot start with underscore") + return nil, unstable.NewParserError(b[0:1], "number cannot start with underscore") } if b[len(b)-1] == '_' { - return nil, newDecodeError(b[len(b)-1:], "number cannot end with underscore") + return nil, unstable.NewParserError(b[len(b)-1:], "number cannot end with underscore") } // fast path @@ -476,10 +478,10 @@ func checkAndRemoveUnderscoresFloats(b []byte) ([]byte, error) { switch c { case '_': if !before { - return nil, newDecodeError(b[i-1:i+1], "number must have at least one digit between underscores") + return nil, unstable.NewParserError(b[i-1:i+1], "number must have at least one digit between underscores") } if i < len(b)-1 && (b[i+1] == 'e' || b[i+1] == 'E') { - return nil, newDecodeError(b[i+1:i+2], "cannot have underscore before exponent") + return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore before exponent") } before = false case '+', '-': @@ -488,15 +490,15 @@ func checkAndRemoveUnderscoresFloats(b []byte) ([]byte, error) { before = false case 'e', 'E': if i < len(b)-1 && b[i+1] == '_' { - return nil, newDecodeError(b[i+1:i+2], "cannot have underscore after exponent") + return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore after exponent") } cleaned = append(cleaned, c) case '.': if i < len(b)-1 && b[i+1] == '_' { - return nil, newDecodeError(b[i+1:i+2], "cannot have underscore after decimal point") + return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore after decimal point") } if i > 0 && b[i-1] == '_' { - return nil, newDecodeError(b[i-1:i], "cannot have underscore before decimal point") + return nil, unstable.NewParserError(b[i-1:i], "cannot have underscore before decimal point") } cleaned = append(cleaned, c) default: @@ -542,3 +544,7 @@ func daysIn(m int, year int) int { func isLeap(year int) bool { return year%4 == 0 && (year%100 != 0 || year%400 == 0) } + +func isDigit(r byte) bool { + return r >= '0' && r <= '9' +} diff --git a/test/performance/vendor/github.com/pelletier/go-toml/v2/errors.go b/test/performance/vendor/github.com/pelletier/go-toml/v2/errors.go index 2e7f0ffdf8..309733f1f9 100644 --- a/test/performance/vendor/github.com/pelletier/go-toml/v2/errors.go +++ b/test/performance/vendor/github.com/pelletier/go-toml/v2/errors.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/pelletier/go-toml/v2/internal/danger" + "github.com/pelletier/go-toml/v2/unstable" ) // DecodeError represents an error encountered during the parsing or decoding @@ -55,25 +56,6 @@ func (s *StrictMissingError) String() string { type Key []string -// internal version of DecodeError that is used as the base to create a -// DecodeError with full context. -type decodeError struct { - highlight []byte - message string - key Key // optional -} - -func (de *decodeError) Error() string { - return de.message -} - -func newDecodeError(highlight []byte, format string, args ...interface{}) error { - return &decodeError{ - highlight: highlight, - message: fmt.Errorf(format, args...).Error(), - } -} - // Error returns the error message contained in the DecodeError. func (e *DecodeError) Error() string { return "toml: " + e.message @@ -105,12 +87,12 @@ func (e *DecodeError) Key() Key { // highlight can be freely deallocated. // //nolint:funlen -func wrapDecodeError(document []byte, de *decodeError) *DecodeError { - offset := danger.SubsliceOffset(document, de.highlight) +func wrapDecodeError(document []byte, de *unstable.ParserError) *DecodeError { + offset := danger.SubsliceOffset(document, de.Highlight) errMessage := de.Error() errLine, errColumn := positionAtEnd(document[:offset]) - before, after := linesOfContext(document, de.highlight, offset, 3) + before, after := linesOfContext(document, de.Highlight, offset, 3) var buf strings.Builder @@ -140,7 +122,7 @@ func wrapDecodeError(document []byte, de *decodeError) *DecodeError { buf.Write(before[0]) } - buf.Write(de.highlight) + buf.Write(de.Highlight) if len(after) > 0 { buf.Write(after[0]) @@ -158,7 +140,7 @@ func wrapDecodeError(document []byte, de *decodeError) *DecodeError { buf.WriteString(strings.Repeat(" ", len(before[0]))) } - buf.WriteString(strings.Repeat("~", len(de.highlight))) + buf.WriteString(strings.Repeat("~", len(de.Highlight))) if len(errMessage) > 0 { buf.WriteString(" ") @@ -183,7 +165,7 @@ func wrapDecodeError(document []byte, de *decodeError) *DecodeError { message: errMessage, line: errLine, column: errColumn, - key: de.key, + key: de.Key, human: buf.String(), } } diff --git a/test/performance/vendor/github.com/pelletier/go-toml/v2/internal/ast/builder.go b/test/performance/vendor/github.com/pelletier/go-toml/v2/internal/ast/builder.go deleted file mode 100644 index 120f16e5ce..0000000000 --- a/test/performance/vendor/github.com/pelletier/go-toml/v2/internal/ast/builder.go +++ /dev/null @@ -1,51 +0,0 @@ -package ast - -type Reference int - -const InvalidReference Reference = -1 - -func (r Reference) Valid() bool { - return r != InvalidReference -} - -type Builder struct { - tree Root - lastIdx int -} - -func (b *Builder) Tree() *Root { - return &b.tree -} - -func (b *Builder) NodeAt(ref Reference) *Node { - return b.tree.at(ref) -} - -func (b *Builder) Reset() { - b.tree.nodes = b.tree.nodes[:0] - b.lastIdx = 0 -} - -func (b *Builder) Push(n Node) Reference { - b.lastIdx = len(b.tree.nodes) - b.tree.nodes = append(b.tree.nodes, n) - return Reference(b.lastIdx) -} - -func (b *Builder) PushAndChain(n Node) Reference { - newIdx := len(b.tree.nodes) - b.tree.nodes = append(b.tree.nodes, n) - if b.lastIdx >= 0 { - b.tree.nodes[b.lastIdx].next = newIdx - b.lastIdx - } - b.lastIdx = newIdx - return Reference(b.lastIdx) -} - -func (b *Builder) AttachChild(parent Reference, child Reference) { - b.tree.nodes[parent].child = int(child) - int(parent) -} - -func (b *Builder) Chain(from Reference, to Reference) { - b.tree.nodes[from].next = int(to) - int(from) -} diff --git a/test/performance/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go b/test/performance/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go new file mode 100644 index 0000000000..80f698db4b --- /dev/null +++ b/test/performance/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go @@ -0,0 +1,42 @@ +package characters + +var invalidAsciiTable = [256]bool{ + 0x00: true, + 0x01: true, + 0x02: true, + 0x03: true, + 0x04: true, + 0x05: true, + 0x06: true, + 0x07: true, + 0x08: true, + // 0x09 TAB + // 0x0A LF + 0x0B: true, + 0x0C: true, + // 0x0D CR + 0x0E: true, + 0x0F: true, + 0x10: true, + 0x11: true, + 0x12: true, + 0x13: true, + 0x14: true, + 0x15: true, + 0x16: true, + 0x17: true, + 0x18: true, + 0x19: true, + 0x1A: true, + 0x1B: true, + 0x1C: true, + 0x1D: true, + 0x1E: true, + 0x1F: true, + // 0x20 - 0x7E Printable ASCII characters + 0x7F: true, +} + +func InvalidAscii(b byte) bool { + return invalidAsciiTable[b] +} diff --git a/vendor/github.com/pelletier/go-toml/v2/utf8.go b/test/performance/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go similarity index 87% rename from vendor/github.com/pelletier/go-toml/v2/utf8.go rename to test/performance/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go index d47a4f20c5..db4f45acbf 100644 --- a/vendor/github.com/pelletier/go-toml/v2/utf8.go +++ b/test/performance/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go @@ -1,4 +1,4 @@ -package toml +package characters import ( "unicode/utf8" @@ -32,7 +32,7 @@ func (u utf8Err) Zero() bool { // 0x9 => tab, ok // 0xA - 0x1F => invalid // 0x7F => invalid -func utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) { +func Utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) { // Fast path. Check for and skip 8 bytes of ASCII characters per iteration. offset := 0 for len(p) >= 8 { @@ -48,7 +48,7 @@ func utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) { } for i, b := range p[:8] { - if invalidAscii(b) { + if InvalidAscii(b) { err.Index = offset + i err.Size = 1 return @@ -62,7 +62,7 @@ func utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) { for i := 0; i < n; { pi := p[i] if pi < utf8.RuneSelf { - if invalidAscii(pi) { + if InvalidAscii(pi) { err.Index = offset + i err.Size = 1 return @@ -106,11 +106,11 @@ func utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) { } // Return the size of the next rune if valid, 0 otherwise. -func utf8ValidNext(p []byte) int { +func Utf8ValidNext(p []byte) int { c := p[0] if c < utf8.RuneSelf { - if invalidAscii(c) { + if InvalidAscii(c) { return 0 } return 1 @@ -140,47 +140,6 @@ func utf8ValidNext(p []byte) int { return size } -var invalidAsciiTable = [256]bool{ - 0x00: true, - 0x01: true, - 0x02: true, - 0x03: true, - 0x04: true, - 0x05: true, - 0x06: true, - 0x07: true, - 0x08: true, - // 0x09 TAB - // 0x0A LF - 0x0B: true, - 0x0C: true, - // 0x0D CR - 0x0E: true, - 0x0F: true, - 0x10: true, - 0x11: true, - 0x12: true, - 0x13: true, - 0x14: true, - 0x15: true, - 0x16: true, - 0x17: true, - 0x18: true, - 0x19: true, - 0x1A: true, - 0x1B: true, - 0x1C: true, - 0x1D: true, - 0x1E: true, - 0x1F: true, - // 0x20 - 0x7E Printable ASCII characters - 0x7F: true, -} - -func invalidAscii(b byte) bool { - return invalidAsciiTable[b] -} - // acceptRange gives the range of valid values for the second byte in a UTF-8 // sequence. type acceptRange struct { diff --git a/test/performance/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go b/test/performance/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go index 7c148f48d4..149b17f538 100644 --- a/test/performance/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go +++ b/test/performance/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go @@ -1,8 +1,6 @@ package tracker -import ( - "github.com/pelletier/go-toml/v2/internal/ast" -) +import "github.com/pelletier/go-toml/v2/unstable" // KeyTracker is a tracker that keeps track of the current Key as the AST is // walked. @@ -11,19 +9,19 @@ type KeyTracker struct { } // UpdateTable sets the state of the tracker with the AST table node. -func (t *KeyTracker) UpdateTable(node *ast.Node) { +func (t *KeyTracker) UpdateTable(node *unstable.Node) { t.reset() t.Push(node) } // UpdateArrayTable sets the state of the tracker with the AST array table node. -func (t *KeyTracker) UpdateArrayTable(node *ast.Node) { +func (t *KeyTracker) UpdateArrayTable(node *unstable.Node) { t.reset() t.Push(node) } // Push the given key on the stack. -func (t *KeyTracker) Push(node *ast.Node) { +func (t *KeyTracker) Push(node *unstable.Node) { it := node.Key() for it.Next() { t.k = append(t.k, string(it.Node().Data)) @@ -31,7 +29,7 @@ func (t *KeyTracker) Push(node *ast.Node) { } // Pop key from stack. -func (t *KeyTracker) Pop(node *ast.Node) { +func (t *KeyTracker) Pop(node *unstable.Node) { it := node.Key() for it.Next() { t.k = t.k[:len(t.k)-1] diff --git a/test/performance/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go b/test/performance/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go index a7ee05ba65..40e23f8304 100644 --- a/test/performance/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go +++ b/test/performance/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go @@ -5,7 +5,7 @@ import ( "fmt" "sync" - "github.com/pelletier/go-toml/v2/internal/ast" + "github.com/pelletier/go-toml/v2/unstable" ) type keyKind uint8 @@ -150,23 +150,23 @@ func (s *SeenTracker) setExplicitFlag(parentIdx int) { // CheckExpression takes a top-level node and checks that it does not contain // keys that have been seen in previous calls, and validates that types are // consistent. -func (s *SeenTracker) CheckExpression(node *ast.Node) error { +func (s *SeenTracker) CheckExpression(node *unstable.Node) error { if s.entries == nil { s.reset() } switch node.Kind { - case ast.KeyValue: + case unstable.KeyValue: return s.checkKeyValue(node) - case ast.Table: + case unstable.Table: return s.checkTable(node) - case ast.ArrayTable: + case unstable.ArrayTable: return s.checkArrayTable(node) default: panic(fmt.Errorf("this should not be a top level node type: %s", node.Kind)) } } -func (s *SeenTracker) checkTable(node *ast.Node) error { +func (s *SeenTracker) checkTable(node *unstable.Node) error { if s.currentIdx >= 0 { s.setExplicitFlag(s.currentIdx) } @@ -219,7 +219,7 @@ func (s *SeenTracker) checkTable(node *ast.Node) error { return nil } -func (s *SeenTracker) checkArrayTable(node *ast.Node) error { +func (s *SeenTracker) checkArrayTable(node *unstable.Node) error { if s.currentIdx >= 0 { s.setExplicitFlag(s.currentIdx) } @@ -267,7 +267,7 @@ func (s *SeenTracker) checkArrayTable(node *ast.Node) error { return nil } -func (s *SeenTracker) checkKeyValue(node *ast.Node) error { +func (s *SeenTracker) checkKeyValue(node *unstable.Node) error { parentIdx := s.currentIdx it := node.Key() @@ -297,26 +297,26 @@ func (s *SeenTracker) checkKeyValue(node *ast.Node) error { value := node.Value() switch value.Kind { - case ast.InlineTable: + case unstable.InlineTable: return s.checkInlineTable(value) - case ast.Array: + case unstable.Array: return s.checkArray(value) } return nil } -func (s *SeenTracker) checkArray(node *ast.Node) error { +func (s *SeenTracker) checkArray(node *unstable.Node) error { it := node.Children() for it.Next() { n := it.Node() switch n.Kind { - case ast.InlineTable: + case unstable.InlineTable: err := s.checkInlineTable(n) if err != nil { return err } - case ast.Array: + case unstable.Array: err := s.checkArray(n) if err != nil { return err @@ -326,7 +326,7 @@ func (s *SeenTracker) checkArray(node *ast.Node) error { return nil } -func (s *SeenTracker) checkInlineTable(node *ast.Node) error { +func (s *SeenTracker) checkInlineTable(node *unstable.Node) error { if pool.New == nil { pool.New = func() interface{} { return &SeenTracker{} diff --git a/test/performance/vendor/github.com/pelletier/go-toml/v2/localtime.go b/test/performance/vendor/github.com/pelletier/go-toml/v2/localtime.go index 30a31dcbde..a856bfdb0d 100644 --- a/test/performance/vendor/github.com/pelletier/go-toml/v2/localtime.go +++ b/test/performance/vendor/github.com/pelletier/go-toml/v2/localtime.go @@ -4,6 +4,8 @@ import ( "fmt" "strings" "time" + + "github.com/pelletier/go-toml/v2/unstable" ) // LocalDate represents a calendar day in no specific timezone. @@ -75,7 +77,7 @@ func (d LocalTime) MarshalText() ([]byte, error) { func (d *LocalTime) UnmarshalText(b []byte) error { res, left, err := parseLocalTime(b) if err == nil && len(left) != 0 { - err = newDecodeError(left, "extra characters") + err = unstable.NewParserError(left, "extra characters") } if err != nil { return err @@ -109,7 +111,7 @@ func (d LocalDateTime) MarshalText() ([]byte, error) { func (d *LocalDateTime) UnmarshalText(data []byte) error { res, left, err := parseLocalDateTime(data) if err == nil && len(left) != 0 { - err = newDecodeError(left, "extra characters") + err = unstable.NewParserError(left, "extra characters") } if err != nil { return err diff --git a/test/performance/vendor/github.com/pelletier/go-toml/v2/marshaler.go b/test/performance/vendor/github.com/pelletier/go-toml/v2/marshaler.go index acb288315b..07aceb9024 100644 --- a/test/performance/vendor/github.com/pelletier/go-toml/v2/marshaler.go +++ b/test/performance/vendor/github.com/pelletier/go-toml/v2/marshaler.go @@ -12,6 +12,8 @@ import ( "strings" "time" "unicode" + + "github.com/pelletier/go-toml/v2/internal/characters" ) // Marshal serializes a Go value as a TOML document. @@ -437,7 +439,7 @@ func (enc *Encoder) encodeString(b []byte, v string, options valueOptions) []byt func needsQuoting(v string) bool { // TODO: vectorize for _, b := range []byte(v) { - if b == '\'' || b == '\r' || b == '\n' || invalidAscii(b) { + if b == '\'' || b == '\r' || b == '\n' || characters.InvalidAscii(b) { return true } } diff --git a/test/performance/vendor/github.com/pelletier/go-toml/v2/strict.go b/test/performance/vendor/github.com/pelletier/go-toml/v2/strict.go index b7830d139c..802e7e4d15 100644 --- a/test/performance/vendor/github.com/pelletier/go-toml/v2/strict.go +++ b/test/performance/vendor/github.com/pelletier/go-toml/v2/strict.go @@ -1,9 +1,9 @@ package toml import ( - "github.com/pelletier/go-toml/v2/internal/ast" "github.com/pelletier/go-toml/v2/internal/danger" "github.com/pelletier/go-toml/v2/internal/tracker" + "github.com/pelletier/go-toml/v2/unstable" ) type strict struct { @@ -12,10 +12,10 @@ type strict struct { // Tracks the current key being processed. key tracker.KeyTracker - missing []decodeError + missing []unstable.ParserError } -func (s *strict) EnterTable(node *ast.Node) { +func (s *strict) EnterTable(node *unstable.Node) { if !s.Enabled { return } @@ -23,7 +23,7 @@ func (s *strict) EnterTable(node *ast.Node) { s.key.UpdateTable(node) } -func (s *strict) EnterArrayTable(node *ast.Node) { +func (s *strict) EnterArrayTable(node *unstable.Node) { if !s.Enabled { return } @@ -31,7 +31,7 @@ func (s *strict) EnterArrayTable(node *ast.Node) { s.key.UpdateArrayTable(node) } -func (s *strict) EnterKeyValue(node *ast.Node) { +func (s *strict) EnterKeyValue(node *unstable.Node) { if !s.Enabled { return } @@ -39,7 +39,7 @@ func (s *strict) EnterKeyValue(node *ast.Node) { s.key.Push(node) } -func (s *strict) ExitKeyValue(node *ast.Node) { +func (s *strict) ExitKeyValue(node *unstable.Node) { if !s.Enabled { return } @@ -47,27 +47,27 @@ func (s *strict) ExitKeyValue(node *ast.Node) { s.key.Pop(node) } -func (s *strict) MissingTable(node *ast.Node) { +func (s *strict) MissingTable(node *unstable.Node) { if !s.Enabled { return } - s.missing = append(s.missing, decodeError{ - highlight: keyLocation(node), - message: "missing table", - key: s.key.Key(), + s.missing = append(s.missing, unstable.ParserError{ + Highlight: keyLocation(node), + Message: "missing table", + Key: s.key.Key(), }) } -func (s *strict) MissingField(node *ast.Node) { +func (s *strict) MissingField(node *unstable.Node) { if !s.Enabled { return } - s.missing = append(s.missing, decodeError{ - highlight: keyLocation(node), - message: "missing field", - key: s.key.Key(), + s.missing = append(s.missing, unstable.ParserError{ + Highlight: keyLocation(node), + Message: "missing field", + Key: s.key.Key(), }) } @@ -88,7 +88,7 @@ func (s *strict) Error(doc []byte) error { return err } -func keyLocation(node *ast.Node) []byte { +func keyLocation(node *unstable.Node) []byte { k := node.Key() hasOne := k.Next() diff --git a/test/performance/vendor/github.com/pelletier/go-toml/v2/types.go b/test/performance/vendor/github.com/pelletier/go-toml/v2/types.go index 630a454663..3c6b8fe570 100644 --- a/test/performance/vendor/github.com/pelletier/go-toml/v2/types.go +++ b/test/performance/vendor/github.com/pelletier/go-toml/v2/types.go @@ -6,9 +6,9 @@ import ( "time" ) -var timeType = reflect.TypeOf(time.Time{}) -var textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem() -var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() -var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{}) -var sliceInterfaceType = reflect.TypeOf([]interface{}{}) +var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() +var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() +var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}(nil)) +var sliceInterfaceType = reflect.TypeOf([]interface{}(nil)) var stringType = reflect.TypeOf("") diff --git a/test/performance/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go b/test/performance/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go index d0d7a72d08..70f6ec5720 100644 --- a/test/performance/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go +++ b/test/performance/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go @@ -12,16 +12,16 @@ import ( "sync/atomic" "time" - "github.com/pelletier/go-toml/v2/internal/ast" "github.com/pelletier/go-toml/v2/internal/danger" "github.com/pelletier/go-toml/v2/internal/tracker" + "github.com/pelletier/go-toml/v2/unstable" ) // Unmarshal deserializes a TOML document into a Go value. // // It is a shortcut for Decoder.Decode() with the default options. func Unmarshal(data []byte, v interface{}) error { - p := parser{} + p := unstable.Parser{} p.Reset(data) d := decoder{p: &p} @@ -101,7 +101,7 @@ func (d *Decoder) Decode(v interface{}) error { return fmt.Errorf("toml: %w", err) } - p := parser{} + p := unstable.Parser{} p.Reset(b) dec := decoder{ p: &p, @@ -115,7 +115,7 @@ func (d *Decoder) Decode(v interface{}) error { type decoder struct { // Which parser instance in use for this decoding session. - p *parser + p *unstable.Parser // Flag indicating that the current expression is stashed. // If set to true, calling nextExpr will not actually pull a new expression @@ -157,7 +157,7 @@ func (d *decoder) typeMismatchError(toml string, target reflect.Type) error { return fmt.Errorf("toml: cannot decode TOML %s into a Go value of type %s", toml, target) } -func (d *decoder) expr() *ast.Node { +func (d *decoder) expr() *unstable.Node { return d.p.Expression() } @@ -208,12 +208,12 @@ func (d *decoder) FromParser(v interface{}) error { err := d.fromParser(r) if err == nil { - return d.strict.Error(d.p.data) + return d.strict.Error(d.p.Data()) } - var e *decodeError + var e *unstable.ParserError if errors.As(err, &e) { - return wrapDecodeError(d.p.data, e) + return wrapDecodeError(d.p.Data(), e) } return err @@ -234,16 +234,16 @@ func (d *decoder) fromParser(root reflect.Value) error { Rules for the unmarshal code: - The stack is used to keep track of which values need to be set where. -- handle* functions <=> switch on a given ast.Kind. +- handle* functions <=> switch on a given unstable.Kind. - unmarshalX* functions need to unmarshal a node of kind X. - An "object" is either a struct or a map. */ -func (d *decoder) handleRootExpression(expr *ast.Node, v reflect.Value) error { +func (d *decoder) handleRootExpression(expr *unstable.Node, v reflect.Value) error { var x reflect.Value var err error - if !(d.skipUntilTable && expr.Kind == ast.KeyValue) { + if !(d.skipUntilTable && expr.Kind == unstable.KeyValue) { err = d.seen.CheckExpression(expr) if err != nil { return err @@ -251,16 +251,16 @@ func (d *decoder) handleRootExpression(expr *ast.Node, v reflect.Value) error { } switch expr.Kind { - case ast.KeyValue: + case unstable.KeyValue: if d.skipUntilTable { return nil } x, err = d.handleKeyValue(expr, v) - case ast.Table: + case unstable.Table: d.skipUntilTable = false d.strict.EnterTable(expr) x, err = d.handleTable(expr.Key(), v) - case ast.ArrayTable: + case unstable.ArrayTable: d.skipUntilTable = false d.strict.EnterArrayTable(expr) x, err = d.handleArrayTable(expr.Key(), v) @@ -269,7 +269,7 @@ func (d *decoder) handleRootExpression(expr *ast.Node, v reflect.Value) error { } if d.skipUntilTable { - if expr.Kind == ast.Table || expr.Kind == ast.ArrayTable { + if expr.Kind == unstable.Table || expr.Kind == unstable.ArrayTable { d.strict.MissingTable(expr) } } else if err == nil && x.IsValid() { @@ -279,14 +279,14 @@ func (d *decoder) handleRootExpression(expr *ast.Node, v reflect.Value) error { return err } -func (d *decoder) handleArrayTable(key ast.Iterator, v reflect.Value) (reflect.Value, error) { +func (d *decoder) handleArrayTable(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { if key.Next() { return d.handleArrayTablePart(key, v) } return d.handleKeyValues(v) } -func (d *decoder) handleArrayTableCollectionLast(key ast.Iterator, v reflect.Value) (reflect.Value, error) { +func (d *decoder) handleArrayTableCollectionLast(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { switch v.Kind() { case reflect.Interface: elem := v.Elem() @@ -339,13 +339,13 @@ func (d *decoder) handleArrayTableCollectionLast(key ast.Iterator, v reflect.Val case reflect.Array: idx := d.arrayIndex(true, v) if idx >= v.Len() { - return v, fmt.Errorf("toml: cannot decode array table into %s at position %d", v.Type(), idx) + return v, fmt.Errorf("%s at position %d", d.typeMismatchError("array table", v.Type()), idx) } elem := v.Index(idx) _, err := d.handleArrayTable(key, elem) return v, err default: - return reflect.Value{}, fmt.Errorf("toml: cannot decode array table into a %s", v.Type()) + return reflect.Value{}, d.typeMismatchError("array table", v.Type()) } } @@ -353,7 +353,7 @@ func (d *decoder) handleArrayTableCollectionLast(key ast.Iterator, v reflect.Val // evaluated like a normal key, but if it returns a collection, it also needs to // point to the last element of the collection. Unless it is the last part of // the key, then it needs to create a new element at the end. -func (d *decoder) handleArrayTableCollection(key ast.Iterator, v reflect.Value) (reflect.Value, error) { +func (d *decoder) handleArrayTableCollection(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { if key.IsLast() { return d.handleArrayTableCollectionLast(key, v) } @@ -390,7 +390,7 @@ func (d *decoder) handleArrayTableCollection(key ast.Iterator, v reflect.Value) case reflect.Array: idx := d.arrayIndex(false, v) if idx >= v.Len() { - return v, fmt.Errorf("toml: cannot decode array table into %s at position %d", v.Type(), idx) + return v, fmt.Errorf("%s at position %d", d.typeMismatchError("array table", v.Type()), idx) } elem := v.Index(idx) _, err := d.handleArrayTable(key, elem) @@ -400,7 +400,7 @@ func (d *decoder) handleArrayTableCollection(key ast.Iterator, v reflect.Value) return d.handleArrayTable(key, v) } -func (d *decoder) handleKeyPart(key ast.Iterator, v reflect.Value, nextFn handlerFn, makeFn valueMakerFn) (reflect.Value, error) { +func (d *decoder) handleKeyPart(key unstable.Iterator, v reflect.Value, nextFn handlerFn, makeFn valueMakerFn) (reflect.Value, error) { var rv reflect.Value // First, dispatch over v to make sure it is a valid object. @@ -518,7 +518,7 @@ func (d *decoder) handleKeyPart(key ast.Iterator, v reflect.Value, nextFn handle // HandleArrayTablePart navigates the Go structure v using the key v. It is // only used for the prefix (non-last) parts of an array-table. When // encountering a collection, it should go to the last element. -func (d *decoder) handleArrayTablePart(key ast.Iterator, v reflect.Value) (reflect.Value, error) { +func (d *decoder) handleArrayTablePart(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { var makeFn valueMakerFn if key.IsLast() { makeFn = makeSliceInterface @@ -530,10 +530,10 @@ func (d *decoder) handleArrayTablePart(key ast.Iterator, v reflect.Value) (refle // HandleTable returns a reference when it has checked the next expression but // cannot handle it. -func (d *decoder) handleTable(key ast.Iterator, v reflect.Value) (reflect.Value, error) { +func (d *decoder) handleTable(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { if v.Kind() == reflect.Slice { if v.Len() == 0 { - return reflect.Value{}, newDecodeError(key.Node().Data, "cannot store a table in a slice") + return reflect.Value{}, unstable.NewParserError(key.Node().Data, "cannot store a table in a slice") } elem := v.Index(v.Len() - 1) x, err := d.handleTable(key, elem) @@ -560,7 +560,7 @@ func (d *decoder) handleKeyValues(v reflect.Value) (reflect.Value, error) { var rv reflect.Value for d.nextExpr() { expr := d.expr() - if expr.Kind != ast.KeyValue { + if expr.Kind != unstable.KeyValue { // Stash the expression so that fromParser can just loop and use // the right handler. // We could just recurse ourselves here, but at least this gives a @@ -587,7 +587,7 @@ func (d *decoder) handleKeyValues(v reflect.Value) (reflect.Value, error) { } type ( - handlerFn func(key ast.Iterator, v reflect.Value) (reflect.Value, error) + handlerFn func(key unstable.Iterator, v reflect.Value) (reflect.Value, error) valueMakerFn func() reflect.Value ) @@ -599,11 +599,11 @@ func makeSliceInterface() reflect.Value { return reflect.MakeSlice(sliceInterfaceType, 0, 16) } -func (d *decoder) handleTablePart(key ast.Iterator, v reflect.Value) (reflect.Value, error) { +func (d *decoder) handleTablePart(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { return d.handleKeyPart(key, v, d.handleTable, makeMapStringInterface) } -func (d *decoder) tryTextUnmarshaler(node *ast.Node, v reflect.Value) (bool, error) { +func (d *decoder) tryTextUnmarshaler(node *unstable.Node, v reflect.Value) (bool, error) { // Special case for time, because we allow to unmarshal to it from // different kind of AST nodes. if v.Type() == timeType { @@ -613,7 +613,7 @@ func (d *decoder) tryTextUnmarshaler(node *ast.Node, v reflect.Value) (bool, err if v.CanAddr() && v.Addr().Type().Implements(textUnmarshalerType) { err := v.Addr().Interface().(encoding.TextUnmarshaler).UnmarshalText(node.Data) if err != nil { - return false, newDecodeError(d.p.Raw(node.Raw), "%w", err) + return false, unstable.NewParserError(d.p.Raw(node.Raw), "%w", err) } return true, nil @@ -622,7 +622,7 @@ func (d *decoder) tryTextUnmarshaler(node *ast.Node, v reflect.Value) (bool, err return false, nil } -func (d *decoder) handleValue(value *ast.Node, v reflect.Value) error { +func (d *decoder) handleValue(value *unstable.Node, v reflect.Value) error { for v.Kind() == reflect.Ptr { v = initAndDereferencePointer(v) } @@ -633,32 +633,32 @@ func (d *decoder) handleValue(value *ast.Node, v reflect.Value) error { } switch value.Kind { - case ast.String: + case unstable.String: return d.unmarshalString(value, v) - case ast.Integer: + case unstable.Integer: return d.unmarshalInteger(value, v) - case ast.Float: + case unstable.Float: return d.unmarshalFloat(value, v) - case ast.Bool: + case unstable.Bool: return d.unmarshalBool(value, v) - case ast.DateTime: + case unstable.DateTime: return d.unmarshalDateTime(value, v) - case ast.LocalDate: + case unstable.LocalDate: return d.unmarshalLocalDate(value, v) - case ast.LocalTime: + case unstable.LocalTime: return d.unmarshalLocalTime(value, v) - case ast.LocalDateTime: + case unstable.LocalDateTime: return d.unmarshalLocalDateTime(value, v) - case ast.InlineTable: + case unstable.InlineTable: return d.unmarshalInlineTable(value, v) - case ast.Array: + case unstable.Array: return d.unmarshalArray(value, v) default: panic(fmt.Errorf("handleValue not implemented for %s", value.Kind)) } } -func (d *decoder) unmarshalArray(array *ast.Node, v reflect.Value) error { +func (d *decoder) unmarshalArray(array *unstable.Node, v reflect.Value) error { switch v.Kind() { case reflect.Slice: if v.IsNil() { @@ -729,7 +729,7 @@ func (d *decoder) unmarshalArray(array *ast.Node, v reflect.Value) error { return nil } -func (d *decoder) unmarshalInlineTable(itable *ast.Node, v reflect.Value) error { +func (d *decoder) unmarshalInlineTable(itable *unstable.Node, v reflect.Value) error { // Make sure v is an initialized object. switch v.Kind() { case reflect.Map: @@ -746,7 +746,7 @@ func (d *decoder) unmarshalInlineTable(itable *ast.Node, v reflect.Value) error } return d.unmarshalInlineTable(itable, elem) default: - return newDecodeError(itable.Data, "cannot store inline table in Go type %s", v.Kind()) + return unstable.NewParserError(itable.Data, "cannot store inline table in Go type %s", v.Kind()) } it := itable.Children() @@ -765,7 +765,7 @@ func (d *decoder) unmarshalInlineTable(itable *ast.Node, v reflect.Value) error return nil } -func (d *decoder) unmarshalDateTime(value *ast.Node, v reflect.Value) error { +func (d *decoder) unmarshalDateTime(value *unstable.Node, v reflect.Value) error { dt, err := parseDateTime(value.Data) if err != nil { return err @@ -775,7 +775,7 @@ func (d *decoder) unmarshalDateTime(value *ast.Node, v reflect.Value) error { return nil } -func (d *decoder) unmarshalLocalDate(value *ast.Node, v reflect.Value) error { +func (d *decoder) unmarshalLocalDate(value *unstable.Node, v reflect.Value) error { ld, err := parseLocalDate(value.Data) if err != nil { return err @@ -792,28 +792,28 @@ func (d *decoder) unmarshalLocalDate(value *ast.Node, v reflect.Value) error { return nil } -func (d *decoder) unmarshalLocalTime(value *ast.Node, v reflect.Value) error { +func (d *decoder) unmarshalLocalTime(value *unstable.Node, v reflect.Value) error { lt, rest, err := parseLocalTime(value.Data) if err != nil { return err } if len(rest) > 0 { - return newDecodeError(rest, "extra characters at the end of a local time") + return unstable.NewParserError(rest, "extra characters at the end of a local time") } v.Set(reflect.ValueOf(lt)) return nil } -func (d *decoder) unmarshalLocalDateTime(value *ast.Node, v reflect.Value) error { +func (d *decoder) unmarshalLocalDateTime(value *unstable.Node, v reflect.Value) error { ldt, rest, err := parseLocalDateTime(value.Data) if err != nil { return err } if len(rest) > 0 { - return newDecodeError(rest, "extra characters at the end of a local date time") + return unstable.NewParserError(rest, "extra characters at the end of a local date time") } if v.Type() == timeType { @@ -828,7 +828,7 @@ func (d *decoder) unmarshalLocalDateTime(value *ast.Node, v reflect.Value) error return nil } -func (d *decoder) unmarshalBool(value *ast.Node, v reflect.Value) error { +func (d *decoder) unmarshalBool(value *unstable.Node, v reflect.Value) error { b := value.Data[0] == 't' switch v.Kind() { @@ -837,13 +837,13 @@ func (d *decoder) unmarshalBool(value *ast.Node, v reflect.Value) error { case reflect.Interface: v.Set(reflect.ValueOf(b)) default: - return newDecodeError(value.Data, "cannot assign boolean to a %t", b) + return unstable.NewParserError(value.Data, "cannot assign boolean to a %t", b) } return nil } -func (d *decoder) unmarshalFloat(value *ast.Node, v reflect.Value) error { +func (d *decoder) unmarshalFloat(value *unstable.Node, v reflect.Value) error { f, err := parseFloat(value.Data) if err != nil { return err @@ -854,13 +854,13 @@ func (d *decoder) unmarshalFloat(value *ast.Node, v reflect.Value) error { v.SetFloat(f) case reflect.Float32: if f > math.MaxFloat32 { - return newDecodeError(value.Data, "number %f does not fit in a float32", f) + return unstable.NewParserError(value.Data, "number %f does not fit in a float32", f) } v.SetFloat(f) case reflect.Interface: v.Set(reflect.ValueOf(f)) default: - return newDecodeError(value.Data, "float cannot be assigned to %s", v.Kind()) + return unstable.NewParserError(value.Data, "float cannot be assigned to %s", v.Kind()) } return nil @@ -886,7 +886,7 @@ func init() { } } -func (d *decoder) unmarshalInteger(value *ast.Node, v reflect.Value) error { +func (d *decoder) unmarshalInteger(value *unstable.Node, v reflect.Value) error { i, err := parseInteger(value.Data) if err != nil { return err @@ -967,20 +967,20 @@ func (d *decoder) unmarshalInteger(value *ast.Node, v reflect.Value) error { return nil } -func (d *decoder) unmarshalString(value *ast.Node, v reflect.Value) error { +func (d *decoder) unmarshalString(value *unstable.Node, v reflect.Value) error { switch v.Kind() { case reflect.String: v.SetString(string(value.Data)) case reflect.Interface: v.Set(reflect.ValueOf(string(value.Data))) default: - return newDecodeError(d.p.Raw(value.Raw), "cannot store TOML string into a Go %s", v.Kind()) + return unstable.NewParserError(d.p.Raw(value.Raw), "cannot store TOML string into a Go %s", v.Kind()) } return nil } -func (d *decoder) handleKeyValue(expr *ast.Node, v reflect.Value) (reflect.Value, error) { +func (d *decoder) handleKeyValue(expr *unstable.Node, v reflect.Value) (reflect.Value, error) { d.strict.EnterKeyValue(expr) v, err := d.handleKeyValueInner(expr.Key(), expr.Value(), v) @@ -994,7 +994,7 @@ func (d *decoder) handleKeyValue(expr *ast.Node, v reflect.Value) (reflect.Value return v, err } -func (d *decoder) handleKeyValueInner(key ast.Iterator, value *ast.Node, v reflect.Value) (reflect.Value, error) { +func (d *decoder) handleKeyValueInner(key unstable.Iterator, value *unstable.Node, v reflect.Value) (reflect.Value, error) { if key.Next() { // Still scoping the key return d.handleKeyValuePart(key, value, v) @@ -1004,7 +1004,7 @@ func (d *decoder) handleKeyValueInner(key ast.Iterator, value *ast.Node, v refle return reflect.Value{}, d.handleValue(value, v) } -func (d *decoder) handleKeyValuePart(key ast.Iterator, value *ast.Node, v reflect.Value) (reflect.Value, error) { +func (d *decoder) handleKeyValuePart(key unstable.Iterator, value *unstable.Node, v reflect.Value) (reflect.Value, error) { // contains the replacement for v var rv reflect.Value diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/ast/ast.go b/test/performance/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go similarity index 60% rename from vendor/github.com/pelletier/go-toml/v2/internal/ast/ast.go rename to test/performance/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go index 9dec2e0007..b60d9bfd6d 100644 --- a/vendor/github.com/pelletier/go-toml/v2/internal/ast/ast.go +++ b/test/performance/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go @@ -1,4 +1,4 @@ -package ast +package unstable import ( "fmt" @@ -7,13 +7,16 @@ import ( "github.com/pelletier/go-toml/v2/internal/danger" ) -// Iterator starts uninitialized, you need to call Next() first. +// Iterator over a sequence of nodes. +// +// Starts uninitialized, you need to call Next() first. // // For example: // // it := n.Children() // for it.Next() { -// it.Node() +// n := it.Node() +// // do something with n // } type Iterator struct { started bool @@ -32,42 +35,31 @@ func (c *Iterator) Next() bool { } // IsLast returns true if the current node of the iterator is the last -// one. Subsequent call to Next() will return false. +// one. Subsequent calls to Next() will return false. func (c *Iterator) IsLast() bool { return c.node.next == 0 } -// Node returns a copy of the node pointed at by the iterator. +// Node returns a pointer to the node pointed at by the iterator. func (c *Iterator) Node() *Node { return c.node } -// Root contains a full AST. +// Node in a TOML expression AST. // -// It is immutable once constructed with Builder. -type Root struct { - nodes []Node -} - -// Iterator over the top level nodes. -func (r *Root) Iterator() Iterator { - it := Iterator{} - if len(r.nodes) > 0 { - it.node = &r.nodes[0] - } - return it -} - -func (r *Root) at(idx Reference) *Node { - return &r.nodes[idx] -} - -// Arrays have one child per element in the array. InlineTables have -// one child per key-value pair in the table. KeyValues have at least -// two children. The first one is the value. The rest make a -// potentially dotted key. Table and Array table have one child per -// element of the key they represent (same as KeyValue, but without -// the last node being the value). +// Depending on Kind, its sequence of children should be interpreted +// differently. +// +// - Array have one child per element in the array. +// - InlineTable have one child per key-value in the table (each of kind +// InlineTable). +// - KeyValue have at least two children. The first one is the value. The rest +// make a potentially dotted key. +// - Table and ArrayTable's children represent a dotted key (same as +// KeyValue, but without the first node being the value). +// +// When relevant, Raw describes the range of bytes this node is refering to in +// the input document. Use Parser.Raw() to retrieve the actual bytes. type Node struct { Kind Kind Raw Range // Raw bytes from the input. @@ -80,13 +72,13 @@ type Node struct { child int // 0 if no child } +// Range of bytes in the document. type Range struct { Offset uint32 Length uint32 } -// Next returns a copy of the next node, or an invalid Node if there -// is no next node. +// Next returns a pointer to the next node, or nil if there is no next node. func (n *Node) Next() *Node { if n.next == 0 { return nil @@ -96,9 +88,9 @@ func (n *Node) Next() *Node { return (*Node)(danger.Stride(ptr, size, n.next)) } -// Child returns a copy of the first child node of this node. Other -// children can be accessed calling Next on the first child. Returns -// an invalid Node if there is none. +// Child returns a pointer to the first child node of this node. Other children +// can be accessed calling Next on the first child. Returns an nil if this Node +// has no child. func (n *Node) Child() *Node { if n.child == 0 { return nil @@ -113,9 +105,9 @@ func (n *Node) Valid() bool { return n != nil } -// Key returns the child nodes making the Key on a supported -// node. Panics otherwise. They are guaranteed to be all be of the -// Kind Key. A simple key would return just one element. +// Key returns the children nodes making the Key on a supported node. Panics +// otherwise. They are guaranteed to be all be of the Kind Key. A simple key +// would return just one element. func (n *Node) Key() Iterator { switch n.Kind { case KeyValue: diff --git a/test/performance/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go b/test/performance/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go new file mode 100644 index 0000000000..9538e30df9 --- /dev/null +++ b/test/performance/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go @@ -0,0 +1,71 @@ +package unstable + +// root contains a full AST. +// +// It is immutable once constructed with Builder. +type root struct { + nodes []Node +} + +// Iterator over the top level nodes. +func (r *root) Iterator() Iterator { + it := Iterator{} + if len(r.nodes) > 0 { + it.node = &r.nodes[0] + } + return it +} + +func (r *root) at(idx reference) *Node { + return &r.nodes[idx] +} + +type reference int + +const invalidReference reference = -1 + +func (r reference) Valid() bool { + return r != invalidReference +} + +type builder struct { + tree root + lastIdx int +} + +func (b *builder) Tree() *root { + return &b.tree +} + +func (b *builder) NodeAt(ref reference) *Node { + return b.tree.at(ref) +} + +func (b *builder) Reset() { + b.tree.nodes = b.tree.nodes[:0] + b.lastIdx = 0 +} + +func (b *builder) Push(n Node) reference { + b.lastIdx = len(b.tree.nodes) + b.tree.nodes = append(b.tree.nodes, n) + return reference(b.lastIdx) +} + +func (b *builder) PushAndChain(n Node) reference { + newIdx := len(b.tree.nodes) + b.tree.nodes = append(b.tree.nodes, n) + if b.lastIdx >= 0 { + b.tree.nodes[b.lastIdx].next = newIdx - b.lastIdx + } + b.lastIdx = newIdx + return reference(b.lastIdx) +} + +func (b *builder) AttachChild(parent reference, child reference) { + b.tree.nodes[parent].child = int(child) - int(parent) +} + +func (b *builder) Chain(from reference, to reference) { + b.tree.nodes[from].next = int(to) - int(from) +} diff --git a/test/performance/vendor/github.com/pelletier/go-toml/v2/unstable/doc.go b/test/performance/vendor/github.com/pelletier/go-toml/v2/unstable/doc.go new file mode 100644 index 0000000000..7ff26c53c7 --- /dev/null +++ b/test/performance/vendor/github.com/pelletier/go-toml/v2/unstable/doc.go @@ -0,0 +1,3 @@ +// Package unstable provides APIs that do not meet the backward compatibility +// guarantees yet. +package unstable diff --git a/test/performance/vendor/github.com/pelletier/go-toml/v2/internal/ast/kind.go b/test/performance/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go similarity index 81% rename from test/performance/vendor/github.com/pelletier/go-toml/v2/internal/ast/kind.go rename to test/performance/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go index 2b50c67fce..ff9df1bef8 100644 --- a/test/performance/vendor/github.com/pelletier/go-toml/v2/internal/ast/kind.go +++ b/test/performance/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go @@ -1,25 +1,26 @@ -package ast +package unstable import "fmt" +// Kind represents the type of TOML structure contained in a given Node. type Kind int const ( - // meta + // Meta Invalid Kind = iota Comment Key - // top level structures + // Top level structures Table ArrayTable KeyValue - // containers values + // Containers values Array InlineTable - // values + // Values String Bool Float @@ -30,6 +31,7 @@ const ( DateTime ) +// String implementation of fmt.Stringer. func (k Kind) String() string { switch k { case Invalid: diff --git a/vendor/github.com/pelletier/go-toml/v2/parser.go b/test/performance/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go similarity index 70% rename from vendor/github.com/pelletier/go-toml/v2/parser.go rename to test/performance/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go index 9859a795bd..52db88e7a5 100644 --- a/vendor/github.com/pelletier/go-toml/v2/parser.go +++ b/test/performance/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go @@ -1,50 +1,108 @@ -package toml +package unstable import ( "bytes" + "fmt" "unicode" - "github.com/pelletier/go-toml/v2/internal/ast" + "github.com/pelletier/go-toml/v2/internal/characters" "github.com/pelletier/go-toml/v2/internal/danger" ) -type parser struct { - builder ast.Builder - ref ast.Reference +// ParserError describes an error relative to the content of the document. +// +// It cannot outlive the instance of Parser it refers to, and may cause panics +// if the parser is reset. +type ParserError struct { + Highlight []byte + Message string + Key []string // optional +} + +// Error is the implementation of the error interface. +func (e *ParserError) Error() string { + return e.Message +} + +// NewParserError is a convenience function to create a ParserError +// +// Warning: Highlight needs to be a subslice of Parser.data, so only slices +// returned by Parser.Raw are valid candidates. +func NewParserError(highlight []byte, format string, args ...interface{}) error { + return &ParserError{ + Highlight: highlight, + Message: fmt.Errorf(format, args...).Error(), + } +} + +// Parser scans over a TOML-encoded document and generates an iterative AST. +// +// To prime the Parser, first reset it with the contents of a TOML document. +// Then, process all top-level expressions sequentially. See Example. +// +// Don't forget to check Error() after you're done parsing. +// +// Each top-level expression needs to be fully processed before calling +// NextExpression() again. Otherwise, calls to various Node methods may panic if +// the parser has moved on the next expression. +// +// For performance reasons, go-toml doesn't make a copy of the input bytes to +// the parser. Make sure to copy all the bytes you need to outlive the slice +// given to the parser. +// +// The parser doesn't provide nodes for comments yet, nor for whitespace. +type Parser struct { data []byte + builder builder + ref reference left []byte err error first bool } -func (p *parser) Range(b []byte) ast.Range { - return ast.Range{ +// Data returns the slice provided to the last call to Reset. +func (p *Parser) Data() []byte { + return p.data +} + +// Range returns a range description that corresponds to a given slice of the +// input. If the argument is not a subslice of the parser input, this function +// panics. +func (p *Parser) Range(b []byte) Range { + return Range{ Offset: uint32(danger.SubsliceOffset(p.data, b)), Length: uint32(len(b)), } } -func (p *parser) Raw(raw ast.Range) []byte { +// Raw returns the slice corresponding to the bytes in the given range. +func (p *Parser) Raw(raw Range) []byte { return p.data[raw.Offset : raw.Offset+raw.Length] } -func (p *parser) Reset(b []byte) { +// Reset brings the parser to its initial state for a given input. It wipes an +// reuses internal storage to reduce allocation. +func (p *Parser) Reset(b []byte) { p.builder.Reset() - p.ref = ast.InvalidReference + p.ref = invalidReference p.data = b p.left = b p.err = nil p.first = true } -//nolint:cyclop -func (p *parser) NextExpression() bool { +// NextExpression parses the next top-level expression. If an expression was +// successfully parsed, it returns true. If the parser is at the end of the +// document or an error occurred, it returns false. +// +// Retrieve the parsed expression with Expression(). +func (p *Parser) NextExpression() bool { if len(p.left) == 0 || p.err != nil { return false } p.builder.Reset() - p.ref = ast.InvalidReference + p.ref = invalidReference for { if len(p.left) == 0 || p.err != nil { @@ -73,15 +131,18 @@ func (p *parser) NextExpression() bool { } } -func (p *parser) Expression() *ast.Node { +// Expression returns a pointer to the node representing the last successfully +// parsed expresion. +func (p *Parser) Expression() *Node { return p.builder.NodeAt(p.ref) } -func (p *parser) Error() error { +// Error returns any error that has occured during parsing. +func (p *Parser) Error() error { return p.err } -func (p *parser) parseNewline(b []byte) ([]byte, error) { +func (p *Parser) parseNewline(b []byte) ([]byte, error) { if b[0] == '\n' { return b[1:], nil } @@ -91,14 +152,14 @@ func (p *parser) parseNewline(b []byte) ([]byte, error) { return rest, err } - return nil, newDecodeError(b[0:1], "expected newline but got %#U", b[0]) + return nil, NewParserError(b[0:1], "expected newline but got %#U", b[0]) } -func (p *parser) parseExpression(b []byte) (ast.Reference, []byte, error) { +func (p *Parser) parseExpression(b []byte) (reference, []byte, error) { // expression = ws [ comment ] // expression =/ ws keyval ws [ comment ] // expression =/ ws table ws [ comment ] - ref := ast.InvalidReference + ref := invalidReference b = p.parseWhitespace(b) @@ -136,7 +197,7 @@ func (p *parser) parseExpression(b []byte) (ast.Reference, []byte, error) { return ref, b, nil } -func (p *parser) parseTable(b []byte) (ast.Reference, []byte, error) { +func (p *Parser) parseTable(b []byte) (reference, []byte, error) { // table = std-table / array-table if len(b) > 1 && b[1] == '[' { return p.parseArrayTable(b) @@ -145,12 +206,12 @@ func (p *parser) parseTable(b []byte) (ast.Reference, []byte, error) { return p.parseStdTable(b) } -func (p *parser) parseArrayTable(b []byte) (ast.Reference, []byte, error) { +func (p *Parser) parseArrayTable(b []byte) (reference, []byte, error) { // array-table = array-table-open key array-table-close // array-table-open = %x5B.5B ws ; [[ Double left square bracket // array-table-close = ws %x5D.5D ; ]] Double right square bracket - ref := p.builder.Push(ast.Node{ - Kind: ast.ArrayTable, + ref := p.builder.Push(Node{ + Kind: ArrayTable, }) b = b[2:] @@ -174,12 +235,12 @@ func (p *parser) parseArrayTable(b []byte) (ast.Reference, []byte, error) { return ref, b, err } -func (p *parser) parseStdTable(b []byte) (ast.Reference, []byte, error) { +func (p *Parser) parseStdTable(b []byte) (reference, []byte, error) { // std-table = std-table-open key std-table-close // std-table-open = %x5B ws ; [ Left square bracket // std-table-close = ws %x5D ; ] Right square bracket - ref := p.builder.Push(ast.Node{ - Kind: ast.Table, + ref := p.builder.Push(Node{ + Kind: Table, }) b = b[1:] @@ -199,15 +260,15 @@ func (p *parser) parseStdTable(b []byte) (ast.Reference, []byte, error) { return ref, b, err } -func (p *parser) parseKeyval(b []byte) (ast.Reference, []byte, error) { +func (p *Parser) parseKeyval(b []byte) (reference, []byte, error) { // keyval = key keyval-sep val - ref := p.builder.Push(ast.Node{ - Kind: ast.KeyValue, + ref := p.builder.Push(Node{ + Kind: KeyValue, }) key, b, err := p.parseKey(b) if err != nil { - return ast.InvalidReference, nil, err + return invalidReference, nil, err } // keyval-sep = ws %x3D ws ; = @@ -215,12 +276,12 @@ func (p *parser) parseKeyval(b []byte) (ast.Reference, []byte, error) { b = p.parseWhitespace(b) if len(b) == 0 { - return ast.InvalidReference, nil, newDecodeError(b, "expected = after a key, but the document ends there") + return invalidReference, nil, NewParserError(b, "expected = after a key, but the document ends there") } b, err = expect('=', b) if err != nil { - return ast.InvalidReference, nil, err + return invalidReference, nil, err } b = p.parseWhitespace(b) @@ -237,12 +298,12 @@ func (p *parser) parseKeyval(b []byte) (ast.Reference, []byte, error) { } //nolint:cyclop,funlen -func (p *parser) parseVal(b []byte) (ast.Reference, []byte, error) { +func (p *Parser) parseVal(b []byte) (reference, []byte, error) { // val = string / boolean / array / inline-table / date-time / float / integer - ref := ast.InvalidReference + ref := invalidReference if len(b) == 0 { - return ref, nil, newDecodeError(b, "expected value, not eof") + return ref, nil, NewParserError(b, "expected value, not eof") } var err error @@ -259,8 +320,8 @@ func (p *parser) parseVal(b []byte) (ast.Reference, []byte, error) { } if err == nil { - ref = p.builder.Push(ast.Node{ - Kind: ast.String, + ref = p.builder.Push(Node{ + Kind: String, Raw: p.Range(raw), Data: v, }) @@ -277,8 +338,8 @@ func (p *parser) parseVal(b []byte) (ast.Reference, []byte, error) { } if err == nil { - ref = p.builder.Push(ast.Node{ - Kind: ast.String, + ref = p.builder.Push(Node{ + Kind: String, Raw: p.Range(raw), Data: v, }) @@ -287,22 +348,22 @@ func (p *parser) parseVal(b []byte) (ast.Reference, []byte, error) { return ref, b, err case 't': if !scanFollowsTrue(b) { - return ref, nil, newDecodeError(atmost(b, 4), "expected 'true'") + return ref, nil, NewParserError(atmost(b, 4), "expected 'true'") } - ref = p.builder.Push(ast.Node{ - Kind: ast.Bool, + ref = p.builder.Push(Node{ + Kind: Bool, Data: b[:4], }) return ref, b[4:], nil case 'f': if !scanFollowsFalse(b) { - return ref, nil, newDecodeError(atmost(b, 5), "expected 'false'") + return ref, nil, NewParserError(atmost(b, 5), "expected 'false'") } - ref = p.builder.Push(ast.Node{ - Kind: ast.Bool, + ref = p.builder.Push(Node{ + Kind: Bool, Data: b[:5], }) @@ -324,7 +385,7 @@ func atmost(b []byte, n int) []byte { return b[:n] } -func (p *parser) parseLiteralString(b []byte) ([]byte, []byte, []byte, error) { +func (p *Parser) parseLiteralString(b []byte) ([]byte, []byte, []byte, error) { v, rest, err := scanLiteralString(b) if err != nil { return nil, nil, nil, err @@ -333,19 +394,19 @@ func (p *parser) parseLiteralString(b []byte) ([]byte, []byte, []byte, error) { return v, v[1 : len(v)-1], rest, nil } -func (p *parser) parseInlineTable(b []byte) (ast.Reference, []byte, error) { +func (p *Parser) parseInlineTable(b []byte) (reference, []byte, error) { // inline-table = inline-table-open [ inline-table-keyvals ] inline-table-close // inline-table-open = %x7B ws ; { // inline-table-close = ws %x7D ; } // inline-table-sep = ws %x2C ws ; , Comma // inline-table-keyvals = keyval [ inline-table-sep inline-table-keyvals ] - parent := p.builder.Push(ast.Node{ - Kind: ast.InlineTable, + parent := p.builder.Push(Node{ + Kind: InlineTable, }) first := true - var child ast.Reference + var child reference b = b[1:] @@ -356,7 +417,7 @@ func (p *parser) parseInlineTable(b []byte) (ast.Reference, []byte, error) { b = p.parseWhitespace(b) if len(b) == 0 { - return parent, nil, newDecodeError(previousB[:1], "inline table is incomplete") + return parent, nil, NewParserError(previousB[:1], "inline table is incomplete") } if b[0] == '}' { @@ -371,7 +432,7 @@ func (p *parser) parseInlineTable(b []byte) (ast.Reference, []byte, error) { b = p.parseWhitespace(b) } - var kv ast.Reference + var kv reference kv, b, err = p.parseKeyval(b) if err != nil { @@ -394,7 +455,7 @@ func (p *parser) parseInlineTable(b []byte) (ast.Reference, []byte, error) { } //nolint:funlen,cyclop -func (p *parser) parseValArray(b []byte) (ast.Reference, []byte, error) { +func (p *Parser) parseValArray(b []byte) (reference, []byte, error) { // array = array-open [ array-values ] ws-comment-newline array-close // array-open = %x5B ; [ // array-close = %x5D ; ] @@ -405,13 +466,13 @@ func (p *parser) parseValArray(b []byte) (ast.Reference, []byte, error) { arrayStart := b b = b[1:] - parent := p.builder.Push(ast.Node{ - Kind: ast.Array, + parent := p.builder.Push(Node{ + Kind: Array, }) first := true - var lastChild ast.Reference + var lastChild reference var err error for len(b) > 0 { @@ -421,7 +482,7 @@ func (p *parser) parseValArray(b []byte) (ast.Reference, []byte, error) { } if len(b) == 0 { - return parent, nil, newDecodeError(arrayStart[:1], "array is incomplete") + return parent, nil, NewParserError(arrayStart[:1], "array is incomplete") } if b[0] == ']' { @@ -430,7 +491,7 @@ func (p *parser) parseValArray(b []byte) (ast.Reference, []byte, error) { if b[0] == ',' { if first { - return parent, nil, newDecodeError(b[0:1], "array cannot start with comma") + return parent, nil, NewParserError(b[0:1], "array cannot start with comma") } b = b[1:] @@ -439,7 +500,7 @@ func (p *parser) parseValArray(b []byte) (ast.Reference, []byte, error) { return parent, nil, err } } else if !first { - return parent, nil, newDecodeError(b[0:1], "array elements must be separated by commas") + return parent, nil, NewParserError(b[0:1], "array elements must be separated by commas") } // TOML allows trailing commas in arrays. @@ -447,7 +508,7 @@ func (p *parser) parseValArray(b []byte) (ast.Reference, []byte, error) { break } - var valueRef ast.Reference + var valueRef reference valueRef, b, err = p.parseVal(b) if err != nil { return parent, nil, err @@ -472,7 +533,7 @@ func (p *parser) parseValArray(b []byte) (ast.Reference, []byte, error) { return parent, rest, err } -func (p *parser) parseOptionalWhitespaceCommentNewline(b []byte) ([]byte, error) { +func (p *Parser) parseOptionalWhitespaceCommentNewline(b []byte) ([]byte, error) { for len(b) > 0 { var err error b = p.parseWhitespace(b) @@ -501,7 +562,7 @@ func (p *parser) parseOptionalWhitespaceCommentNewline(b []byte) ([]byte, error) return b, nil } -func (p *parser) parseMultilineLiteralString(b []byte) ([]byte, []byte, []byte, error) { +func (p *Parser) parseMultilineLiteralString(b []byte) ([]byte, []byte, []byte, error) { token, rest, err := scanMultilineLiteralString(b) if err != nil { return nil, nil, nil, err @@ -520,7 +581,7 @@ func (p *parser) parseMultilineLiteralString(b []byte) ([]byte, []byte, []byte, } //nolint:funlen,gocognit,cyclop -func (p *parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, error) { +func (p *Parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, error) { // ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body // ml-basic-string-delim // ml-basic-string-delim = 3quotation-mark @@ -551,11 +612,11 @@ func (p *parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, er if !escaped { str := token[startIdx:endIdx] - verr := utf8TomlValidAlreadyEscaped(str) + verr := characters.Utf8TomlValidAlreadyEscaped(str) if verr.Zero() { return token, str, rest, nil } - return nil, nil, nil, newDecodeError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8") + return nil, nil, nil, NewParserError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8") } var builder bytes.Buffer @@ -635,13 +696,13 @@ func (p *parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, er builder.WriteRune(x) i += 8 default: - return nil, nil, nil, newDecodeError(token[i:i+1], "invalid escaped character %#U", c) + return nil, nil, nil, NewParserError(token[i:i+1], "invalid escaped character %#U", c) } i++ } else { - size := utf8ValidNext(token[i:]) + size := characters.Utf8ValidNext(token[i:]) if size == 0 { - return nil, nil, nil, newDecodeError(token[i:i+1], "invalid character %#U", c) + return nil, nil, nil, NewParserError(token[i:i+1], "invalid character %#U", c) } builder.Write(token[i : i+size]) i += size @@ -651,7 +712,7 @@ func (p *parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, er return token, builder.Bytes(), rest, nil } -func (p *parser) parseKey(b []byte) (ast.Reference, []byte, error) { +func (p *Parser) parseKey(b []byte) (reference, []byte, error) { // key = simple-key / dotted-key // simple-key = quoted-key / unquoted-key // @@ -662,11 +723,11 @@ func (p *parser) parseKey(b []byte) (ast.Reference, []byte, error) { // dot-sep = ws %x2E ws ; . Period raw, key, b, err := p.parseSimpleKey(b) if err != nil { - return ast.InvalidReference, nil, err + return invalidReference, nil, err } - ref := p.builder.Push(ast.Node{ - Kind: ast.Key, + ref := p.builder.Push(Node{ + Kind: Key, Raw: p.Range(raw), Data: key, }) @@ -681,8 +742,8 @@ func (p *parser) parseKey(b []byte) (ast.Reference, []byte, error) { return ref, nil, err } - p.builder.PushAndChain(ast.Node{ - Kind: ast.Key, + p.builder.PushAndChain(Node{ + Kind: Key, Raw: p.Range(raw), Data: key, }) @@ -694,9 +755,9 @@ func (p *parser) parseKey(b []byte) (ast.Reference, []byte, error) { return ref, b, nil } -func (p *parser) parseSimpleKey(b []byte) (raw, key, rest []byte, err error) { +func (p *Parser) parseSimpleKey(b []byte) (raw, key, rest []byte, err error) { if len(b) == 0 { - return nil, nil, nil, newDecodeError(b, "expected key but found none") + return nil, nil, nil, NewParserError(b, "expected key but found none") } // simple-key = quoted-key / unquoted-key @@ -711,12 +772,12 @@ func (p *parser) parseSimpleKey(b []byte) (raw, key, rest []byte, err error) { key, rest = scanUnquotedKey(b) return key, key, rest, nil default: - return nil, nil, nil, newDecodeError(b[0:1], "invalid character at start of key: %c", b[0]) + return nil, nil, nil, NewParserError(b[0:1], "invalid character at start of key: %c", b[0]) } } //nolint:funlen,cyclop -func (p *parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) { +func (p *Parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) { // basic-string = quotation-mark *basic-char quotation-mark // quotation-mark = %x22 ; " // basic-char = basic-unescaped / escaped @@ -744,11 +805,11 @@ func (p *parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) { // validate the string and return a direct reference to the buffer. if !escaped { str := token[startIdx:endIdx] - verr := utf8TomlValidAlreadyEscaped(str) + verr := characters.Utf8TomlValidAlreadyEscaped(str) if verr.Zero() { return token, str, rest, nil } - return nil, nil, nil, newDecodeError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8") + return nil, nil, nil, NewParserError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8") } i := startIdx @@ -795,13 +856,13 @@ func (p *parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) { builder.WriteRune(x) i += 8 default: - return nil, nil, nil, newDecodeError(token[i:i+1], "invalid escaped character %#U", c) + return nil, nil, nil, NewParserError(token[i:i+1], "invalid escaped character %#U", c) } i++ } else { - size := utf8ValidNext(token[i:]) + size := characters.Utf8ValidNext(token[i:]) if size == 0 { - return nil, nil, nil, newDecodeError(token[i:i+1], "invalid character %#U", c) + return nil, nil, nil, NewParserError(token[i:i+1], "invalid character %#U", c) } builder.Write(token[i : i+size]) i += size @@ -813,7 +874,7 @@ func (p *parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) { func hexToRune(b []byte, length int) (rune, error) { if len(b) < length { - return -1, newDecodeError(b, "unicode point needs %d character, not %d", length, len(b)) + return -1, NewParserError(b, "unicode point needs %d character, not %d", length, len(b)) } b = b[:length] @@ -828,19 +889,19 @@ func hexToRune(b []byte, length int) (rune, error) { case 'A' <= c && c <= 'F': d = uint32(c - 'A' + 10) default: - return -1, newDecodeError(b[i:i+1], "non-hex character") + return -1, NewParserError(b[i:i+1], "non-hex character") } r = r*16 + d } if r > unicode.MaxRune || 0xD800 <= r && r < 0xE000 { - return -1, newDecodeError(b, "escape sequence is invalid Unicode code point") + return -1, NewParserError(b, "escape sequence is invalid Unicode code point") } return rune(r), nil } -func (p *parser) parseWhitespace(b []byte) []byte { +func (p *Parser) parseWhitespace(b []byte) []byte { // ws = *wschar // wschar = %x20 ; Space // wschar =/ %x09 ; Horizontal tab @@ -850,24 +911,24 @@ func (p *parser) parseWhitespace(b []byte) []byte { } //nolint:cyclop -func (p *parser) parseIntOrFloatOrDateTime(b []byte) (ast.Reference, []byte, error) { +func (p *Parser) parseIntOrFloatOrDateTime(b []byte) (reference, []byte, error) { switch b[0] { case 'i': if !scanFollowsInf(b) { - return ast.InvalidReference, nil, newDecodeError(atmost(b, 3), "expected 'inf'") + return invalidReference, nil, NewParserError(atmost(b, 3), "expected 'inf'") } - return p.builder.Push(ast.Node{ - Kind: ast.Float, + return p.builder.Push(Node{ + Kind: Float, Data: b[:3], }), b[3:], nil case 'n': if !scanFollowsNan(b) { - return ast.InvalidReference, nil, newDecodeError(atmost(b, 3), "expected 'nan'") + return invalidReference, nil, NewParserError(atmost(b, 3), "expected 'nan'") } - return p.builder.Push(ast.Node{ - Kind: ast.Float, + return p.builder.Push(Node{ + Kind: Float, Data: b[:3], }), b[3:], nil case '+', '-': @@ -898,7 +959,7 @@ func (p *parser) parseIntOrFloatOrDateTime(b []byte) (ast.Reference, []byte, err return p.scanIntOrFloat(b) } -func (p *parser) scanDateTime(b []byte) (ast.Reference, []byte, error) { +func (p *Parser) scanDateTime(b []byte) (reference, []byte, error) { // scans for contiguous characters in [0-9T:Z.+-], and up to one space if // followed by a digit. hasDate := false @@ -941,30 +1002,30 @@ byteLoop: } } - var kind ast.Kind + var kind Kind if hasTime { if hasDate { if hasTz { - kind = ast.DateTime + kind = DateTime } else { - kind = ast.LocalDateTime + kind = LocalDateTime } } else { - kind = ast.LocalTime + kind = LocalTime } } else { - kind = ast.LocalDate + kind = LocalDate } - return p.builder.Push(ast.Node{ + return p.builder.Push(Node{ Kind: kind, Data: b[:i], }), b[i:], nil } //nolint:funlen,gocognit,cyclop -func (p *parser) scanIntOrFloat(b []byte) (ast.Reference, []byte, error) { +func (p *Parser) scanIntOrFloat(b []byte) (reference, []byte, error) { i := 0 if len(b) > 2 && b[0] == '0' && b[1] != '.' && b[1] != 'e' && b[1] != 'E' { @@ -990,8 +1051,8 @@ func (p *parser) scanIntOrFloat(b []byte) (ast.Reference, []byte, error) { } } - return p.builder.Push(ast.Node{ - Kind: ast.Integer, + return p.builder.Push(Node{ + Kind: Integer, Data: b[:i], }), b[i:], nil } @@ -1013,40 +1074,40 @@ func (p *parser) scanIntOrFloat(b []byte) (ast.Reference, []byte, error) { if c == 'i' { if scanFollowsInf(b[i:]) { - return p.builder.Push(ast.Node{ - Kind: ast.Float, + return p.builder.Push(Node{ + Kind: Float, Data: b[:i+3], }), b[i+3:], nil } - return ast.InvalidReference, nil, newDecodeError(b[i:i+1], "unexpected character 'i' while scanning for a number") + return invalidReference, nil, NewParserError(b[i:i+1], "unexpected character 'i' while scanning for a number") } if c == 'n' { if scanFollowsNan(b[i:]) { - return p.builder.Push(ast.Node{ - Kind: ast.Float, + return p.builder.Push(Node{ + Kind: Float, Data: b[:i+3], }), b[i+3:], nil } - return ast.InvalidReference, nil, newDecodeError(b[i:i+1], "unexpected character 'n' while scanning for a number") + return invalidReference, nil, NewParserError(b[i:i+1], "unexpected character 'n' while scanning for a number") } break } if i == 0 { - return ast.InvalidReference, b, newDecodeError(b, "incomplete number") + return invalidReference, b, NewParserError(b, "incomplete number") } - kind := ast.Integer + kind := Integer if isFloat { - kind = ast.Float + kind = Float } - return p.builder.Push(ast.Node{ + return p.builder.Push(Node{ Kind: kind, Data: b[:i], }), b[i:], nil @@ -1075,11 +1136,11 @@ func isValidBinaryRune(r byte) bool { func expect(x byte, b []byte) ([]byte, error) { if len(b) == 0 { - return nil, newDecodeError(b, "expected character %c but the document ended here", x) + return nil, NewParserError(b, "expected character %c but the document ended here", x) } if b[0] != x { - return nil, newDecodeError(b[0:1], "expected character %c", x) + return nil, NewParserError(b[0:1], "expected character %c", x) } return b[1:], nil diff --git a/vendor/github.com/pelletier/go-toml/v2/scanner.go b/test/performance/vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go similarity index 79% rename from vendor/github.com/pelletier/go-toml/v2/scanner.go rename to test/performance/vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go index bb445fab4c..af22ebbe93 100644 --- a/vendor/github.com/pelletier/go-toml/v2/scanner.go +++ b/test/performance/vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go @@ -1,4 +1,6 @@ -package toml +package unstable + +import "github.com/pelletier/go-toml/v2/internal/characters" func scanFollows(b []byte, pattern string) bool { n := len(pattern) @@ -54,16 +56,16 @@ func scanLiteralString(b []byte) ([]byte, []byte, error) { case '\'': return b[:i+1], b[i+1:], nil case '\n', '\r': - return nil, nil, newDecodeError(b[i:i+1], "literal strings cannot have new lines") + return nil, nil, NewParserError(b[i:i+1], "literal strings cannot have new lines") } - size := utf8ValidNext(b[i:]) + size := characters.Utf8ValidNext(b[i:]) if size == 0 { - return nil, nil, newDecodeError(b[i:i+1], "invalid character") + return nil, nil, NewParserError(b[i:i+1], "invalid character") } i += size } - return nil, nil, newDecodeError(b[len(b):], "unterminated literal string") + return nil, nil, NewParserError(b[len(b):], "unterminated literal string") } func scanMultilineLiteralString(b []byte) ([]byte, []byte, error) { @@ -98,39 +100,39 @@ func scanMultilineLiteralString(b []byte) ([]byte, []byte, error) { i++ if i < len(b) && b[i] == '\'' { - return nil, nil, newDecodeError(b[i-3:i+1], "''' not allowed in multiline literal string") + return nil, nil, NewParserError(b[i-3:i+1], "''' not allowed in multiline literal string") } return b[:i], b[i:], nil } case '\r': if len(b) < i+2 { - return nil, nil, newDecodeError(b[len(b):], `need a \n after \r`) + return nil, nil, NewParserError(b[len(b):], `need a \n after \r`) } if b[i+1] != '\n' { - return nil, nil, newDecodeError(b[i:i+2], `need a \n after \r`) + return nil, nil, NewParserError(b[i:i+2], `need a \n after \r`) } i += 2 // skip the \n continue } - size := utf8ValidNext(b[i:]) + size := characters.Utf8ValidNext(b[i:]) if size == 0 { - return nil, nil, newDecodeError(b[i:i+1], "invalid character") + return nil, nil, NewParserError(b[i:i+1], "invalid character") } i += size } - return nil, nil, newDecodeError(b[len(b):], `multiline literal string not terminated by '''`) + return nil, nil, NewParserError(b[len(b):], `multiline literal string not terminated by '''`) } func scanWindowsNewline(b []byte) ([]byte, []byte, error) { const lenCRLF = 2 if len(b) < lenCRLF { - return nil, nil, newDecodeError(b, "windows new line expected") + return nil, nil, NewParserError(b, "windows new line expected") } if b[1] != '\n' { - return nil, nil, newDecodeError(b, `windows new line should be \r\n`) + return nil, nil, NewParserError(b, `windows new line should be \r\n`) } return b[:lenCRLF], b[lenCRLF:], nil @@ -165,11 +167,11 @@ func scanComment(b []byte) ([]byte, []byte, error) { if i+1 < len(b) && b[i+1] == '\n' { return b[:i+1], b[i+1:], nil } - return nil, nil, newDecodeError(b[i:i+1], "invalid character in comment") + return nil, nil, NewParserError(b[i:i+1], "invalid character in comment") } - size := utf8ValidNext(b[i:]) + size := characters.Utf8ValidNext(b[i:]) if size == 0 { - return nil, nil, newDecodeError(b[i:i+1], "invalid character in comment") + return nil, nil, NewParserError(b[i:i+1], "invalid character in comment") } i += size @@ -192,17 +194,17 @@ func scanBasicString(b []byte) ([]byte, bool, []byte, error) { case '"': return b[:i+1], escaped, b[i+1:], nil case '\n', '\r': - return nil, escaped, nil, newDecodeError(b[i:i+1], "basic strings cannot have new lines") + return nil, escaped, nil, NewParserError(b[i:i+1], "basic strings cannot have new lines") case '\\': if len(b) < i+2 { - return nil, escaped, nil, newDecodeError(b[i:i+1], "need a character after \\") + return nil, escaped, nil, NewParserError(b[i:i+1], "need a character after \\") } escaped = true i++ // skip the next character } } - return nil, escaped, nil, newDecodeError(b[len(b):], `basic string not terminated by "`) + return nil, escaped, nil, NewParserError(b[len(b):], `basic string not terminated by "`) } func scanMultilineBasicString(b []byte) ([]byte, bool, []byte, error) { @@ -243,27 +245,27 @@ func scanMultilineBasicString(b []byte) ([]byte, bool, []byte, error) { i++ if i < len(b) && b[i] == '"' { - return nil, escaped, nil, newDecodeError(b[i-3:i+1], `""" not allowed in multiline basic string`) + return nil, escaped, nil, NewParserError(b[i-3:i+1], `""" not allowed in multiline basic string`) } return b[:i], escaped, b[i:], nil } case '\\': if len(b) < i+2 { - return nil, escaped, nil, newDecodeError(b[len(b):], "need a character after \\") + return nil, escaped, nil, NewParserError(b[len(b):], "need a character after \\") } escaped = true i++ // skip the next character case '\r': if len(b) < i+2 { - return nil, escaped, nil, newDecodeError(b[len(b):], `need a \n after \r`) + return nil, escaped, nil, NewParserError(b[len(b):], `need a \n after \r`) } if b[i+1] != '\n' { - return nil, escaped, nil, newDecodeError(b[i:i+2], `need a \n after \r`) + return nil, escaped, nil, NewParserError(b[i:i+2], `need a \n after \r`) } i++ // skip the \n } } - return nil, escaped, nil, newDecodeError(b[len(b):], `multiline basic string not terminated by """`) + return nil, escaped, nil, NewParserError(b[len(b):], `multiline basic string not terminated by """`) } diff --git a/test/performance/vendor/github.com/rs/cors/cors.go b/test/performance/vendor/github.com/rs/cors/cors.go index 2ce24e3f3a..a47b7df871 100644 --- a/test/performance/vendor/github.com/rs/cors/cors.go +++ b/test/performance/vendor/github.com/rs/cors/cors.go @@ -62,6 +62,9 @@ type Options struct { // AllowCredentials indicates whether the request can include user credentials like // cookies, HTTP authentication or client side SSL certificates. AllowCredentials bool + // AllowPrivateNetwork indicates whether to accept cross-origin requests over a + // private network. + AllowPrivateNetwork bool // OptionsPassthrough instructs preflight to let other potential next handlers to // process the OPTIONS method. Turn this on if your application handles OPTIONS. OptionsPassthrough bool @@ -103,6 +106,7 @@ type Cors struct { // Status code to use for successful OPTIONS requests optionsSuccessStatus int allowCredentials bool + allowPrivateNetwork bool optionPassthrough bool } @@ -113,6 +117,7 @@ func New(options Options) *Cors { allowOriginFunc: options.AllowOriginFunc, allowOriginRequestFunc: options.AllowOriginRequestFunc, allowCredentials: options.AllowCredentials, + allowPrivateNetwork: options.AllowPrivateNetwork, maxAge: options.MaxAge, optionPassthrough: options.OptionsPassthrough, } @@ -282,6 +287,9 @@ func (c *Cors) handlePreflight(w http.ResponseWriter, r *http.Request) { headers.Add("Vary", "Origin") headers.Add("Vary", "Access-Control-Request-Method") headers.Add("Vary", "Access-Control-Request-Headers") + if c.allowPrivateNetwork { + headers.Add("Vary", "Access-Control-Request-Private-Network") + } if origin == "" { c.logf(" Preflight aborted: empty origin") @@ -319,6 +327,9 @@ func (c *Cors) handlePreflight(w http.ResponseWriter, r *http.Request) { if c.allowCredentials { headers.Set("Access-Control-Allow-Credentials", "true") } + if c.allowPrivateNetwork && r.Header.Get("Access-Control-Request-Private-Network") == "true" { + headers.Set("Access-Control-Allow-Private-Network", "true") + } if c.maxAge > 0 { headers.Set("Access-Control-Max-Age", strconv.Itoa(c.maxAge)) } diff --git a/test/performance/vendor/github.com/spf13/afero/memmap.go b/test/performance/vendor/github.com/spf13/afero/memmap.go index ea0798d870..d06975e712 100644 --- a/test/performance/vendor/github.com/spf13/afero/memmap.go +++ b/test/performance/vendor/github.com/spf13/afero/memmap.go @@ -142,6 +142,11 @@ func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { } m.mu.Lock() + // Dobule check that it doesn't exist. + if _, ok := m.getData()[name]; ok { + m.mu.Unlock() + return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists} + } item := mem.CreateDir(name) mem.SetMode(item, os.ModeDir|perm) m.getData()[name] = item diff --git a/test/performance/vendor/github.com/spf13/viper/Makefile b/test/performance/vendor/github.com/spf13/viper/Makefile index 130c427e8b..3f4234d334 100644 --- a/test/performance/vendor/github.com/spf13/viper/Makefile +++ b/test/performance/vendor/github.com/spf13/viper/Makefile @@ -16,7 +16,7 @@ endif # Dependency versions GOTESTSUM_VERSION = 1.8.0 -GOLANGCI_VERSION = 1.49.0 +GOLANGCI_VERSION = 1.50.1 # Add the ability to override some variables # Use with care diff --git a/test/performance/vendor/github.com/spf13/viper/README.md b/test/performance/vendor/github.com/spf13/viper/README.md index 63413a7dc0..cd39290521 100644 --- a/test/performance/vendor/github.com/spf13/viper/README.md +++ b/test/performance/vendor/github.com/spf13/viper/README.md @@ -8,7 +8,7 @@ [![Mentioned in Awesome Go](https://awesome.re/mentioned-badge-flat.svg)](https://github.com/avelino/awesome-go#configuration) [![run on repl.it](https://repl.it/badge/github/sagikazarmark/Viper-example)](https://repl.it/@sagikazarmark/Viper-example#main.go) -[![GitHub Workflow Status](https://img.shields.io/github/workflow/status/spf13/viper/CI?style=flat-square)](https://github.com/spf13/viper/actions?query=workflow%3ACI) +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/viper/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/viper/actions?query=workflow%3ACI) [![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/viper?style=flat-square)](https://goreportcard.com/report/github.com/spf13/viper) ![Go Version](https://img.shields.io/badge/go%20version-%3E=1.16-61CFDD.svg?style=flat-square) @@ -40,8 +40,8 @@ go get github.com/spf13/viper ## What is Viper? -Viper is a complete configuration solution for Go applications including 12-Factor apps. It is designed -to work within an application, and can handle all types of configuration needs +Viper is a complete configuration solution for Go applications including [12-Factor apps](https://12factor.net/#the_twelve_factors). +It is designed to work within an application, and can handle all types of configuration needs and formats. It supports: * setting defaults diff --git a/test/performance/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go b/test/performance/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go index 45fddc8b59..a993c5994b 100644 --- a/test/performance/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go +++ b/test/performance/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go @@ -1,39 +1,16 @@ -//go:build viper_toml1 -// +build viper_toml1 - package toml import ( - "github.com/pelletier/go-toml" + "github.com/pelletier/go-toml/v2" ) // Codec implements the encoding.Encoder and encoding.Decoder interfaces for TOML encoding. type Codec struct{} func (Codec) Encode(v map[string]interface{}) ([]byte, error) { - t, err := toml.TreeFromMap(v) - if err != nil { - return nil, err - } - - s, err := t.ToTomlString() - if err != nil { - return nil, err - } - - return []byte(s), nil + return toml.Marshal(v) } func (Codec) Decode(b []byte, v map[string]interface{}) error { - tree, err := toml.LoadBytes(b) - if err != nil { - return err - } - - tmap := tree.ToMap() - for key, value := range tmap { - v[key] = value - } - - return nil + return toml.Unmarshal(b, &v) } diff --git a/test/performance/vendor/github.com/spf13/viper/internal/encoding/toml/codec2.go b/test/performance/vendor/github.com/spf13/viper/internal/encoding/toml/codec2.go deleted file mode 100644 index 112c6d3725..0000000000 --- a/test/performance/vendor/github.com/spf13/viper/internal/encoding/toml/codec2.go +++ /dev/null @@ -1,19 +0,0 @@ -//go:build !viper_toml1 -// +build !viper_toml1 - -package toml - -import ( - "github.com/pelletier/go-toml/v2" -) - -// Codec implements the encoding.Encoder and encoding.Decoder interfaces for TOML encoding. -type Codec struct{} - -func (Codec) Encode(v map[string]interface{}) ([]byte, error) { - return toml.Marshal(v) -} - -func (Codec) Decode(b []byte, v map[string]interface{}) error { - return toml.Unmarshal(b, &v) -} diff --git a/test/performance/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go b/test/performance/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go index 24cc19dfca..82dc136a3a 100644 --- a/test/performance/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go +++ b/test/performance/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go @@ -1,6 +1,6 @@ package yaml -// import "gopkg.in/yaml.v2" +import "gopkg.in/yaml.v3" // Codec implements the encoding.Encoder and encoding.Decoder interfaces for YAML encoding. type Codec struct{} diff --git a/test/performance/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml2.go b/test/performance/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml2.go deleted file mode 100644 index 4c398c2f42..0000000000 --- a/test/performance/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml2.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build viper_yaml2 -// +build viper_yaml2 - -package yaml - -import yamlv2 "gopkg.in/yaml.v2" - -var yaml = struct { - Marshal func(in interface{}) (out []byte, err error) - Unmarshal func(in []byte, out interface{}) (err error) -}{ - Marshal: yamlv2.Marshal, - Unmarshal: yamlv2.Unmarshal, -} diff --git a/test/performance/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml3.go b/test/performance/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml3.go deleted file mode 100644 index 3a4775ced9..0000000000 --- a/test/performance/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml3.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build !viper_yaml2 -// +build !viper_yaml2 - -package yaml - -import yamlv3 "gopkg.in/yaml.v3" - -var yaml = struct { - Marshal func(in interface{}) (out []byte, err error) - Unmarshal func(in []byte, out interface{}) (err error) -}{ - Marshal: yamlv3.Marshal, - Unmarshal: yamlv3.Unmarshal, -} diff --git a/test/performance/vendor/github.com/spf13/viper/viper.go b/test/performance/vendor/github.com/spf13/viper/viper.go index 5c12529b4c..06610fc5a7 100644 --- a/test/performance/vendor/github.com/spf13/viper/viper.go +++ b/test/performance/vendor/github.com/spf13/viper/viper.go @@ -421,13 +421,18 @@ var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props // SupportedRemoteProviders are universally supported remote providers. var SupportedRemoteProviders = []string{"etcd", "etcd3", "consul", "firestore"} +// OnConfigChange sets the event handler that is called when a config file changes. func OnConfigChange(run func(in fsnotify.Event)) { v.OnConfigChange(run) } + +// OnConfigChange sets the event handler that is called when a config file changes. func (v *Viper) OnConfigChange(run func(in fsnotify.Event)) { v.onConfigChange = run } +// WatchConfig starts watching a config file for changes. func WatchConfig() { v.WatchConfig() } +// WatchConfig starts watching a config file for changes. func (v *Viper) WatchConfig() { initWG := sync.WaitGroup{} initWG.Add(1) diff --git a/test/performance/vendor/github.com/subosito/gotenv/gotenv.go b/test/performance/vendor/github.com/subosito/gotenv/gotenv.go index 7b1186e1fd..dc013e1e07 100644 --- a/test/performance/vendor/github.com/subosito/gotenv/gotenv.go +++ b/test/performance/vendor/github.com/subosito/gotenv/gotenv.go @@ -3,6 +3,7 @@ package gotenv import ( "bufio" + "bytes" "fmt" "io" "os" @@ -174,9 +175,36 @@ func Write(env Env, filename string) error { return file.Sync() } +// splitLines is a valid SplitFunc for a bufio.Scanner. It will split lines on CR ('\r'), LF ('\n') or CRLF (any of the three sequences). +// If a CR is immediately followed by a LF, it is treated as a CRLF (one single line break). +func splitLines(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, bufio.ErrFinalToken + } + + idx := bytes.IndexAny(data, "\r\n") + switch { + case atEOF && idx < 0: + return len(data), data, bufio.ErrFinalToken + + case idx < 0: + return 0, nil, nil + } + + // consume CR or LF + eol := idx + 1 + // detect CRLF + if len(data) > eol && data[eol-1] == '\r' && data[eol] == '\n' { + eol++ + } + + return eol, data[:idx], nil +} + func strictParse(r io.Reader, override bool) (Env, error) { env := make(Env) scanner := bufio.NewScanner(r) + scanner.Split(splitLines) firstLine := true @@ -283,7 +311,6 @@ func parseLine(s string, env Env, override bool) error { return varReplacement(s, hsq, env, override) } val = varRgx.ReplaceAllStringFunc(val, fv) - val = parseVal(val, env, hdq, override) } env[key] = val @@ -352,18 +379,3 @@ func checkFormat(s string, env Env) error { return fmt.Errorf("line `%s` doesn't match format", s) } - -func parseVal(val string, env Env, ignoreNewlines bool, override bool) string { - if strings.Contains(val, "=") && !ignoreNewlines { - kv := strings.Split(val, "\r") - - if len(kv) > 1 { - val = kv[0] - for _, l := range kv[1:] { - _ = parseLine(l, env, override) - } - } - } - - return val -} diff --git a/test/performance/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/test/performance/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index f34a38e4e9..a6b5081888 100644 --- a/test/performance/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/test/performance/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/rpc/status.proto package status @@ -48,11 +48,13 @@ type Status struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + // The status code, which should be an enum value of + // [google.rpc.Code][google.rpc.Code]. Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` // A developer-facing error message, which should be in English. Any // user-facing error message should be localized and sent in the - // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized + // by the client. Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` // A list of messages that carry the error details. There is a common set of // message types for APIs to use. diff --git a/test/performance/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/test/performance/vendor/google.golang.org/grpc/balancer/grpclb/grpclbstate/state.go similarity index 87% rename from test/performance/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go rename to test/performance/vendor/google.golang.org/grpc/balancer/grpclb/grpclbstate/state.go index 4ecfa1c215..cece046be3 100644 --- a/test/performance/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go +++ b/test/performance/vendor/google.golang.org/grpc/balancer/grpclb/grpclbstate/state.go @@ -16,9 +16,9 @@ * */ -// Package state declares grpclb types to be set by resolvers wishing to pass -// information to grpclb via resolver.State Attributes. -package state +// Package grpclbstate declares grpclb types to be set by resolvers wishing to +// pass information to grpclb via resolver.State Attributes. +package grpclbstate import ( "google.golang.org/grpc/resolver" @@ -27,7 +27,7 @@ import ( // keyType is the key to use for storing State in Attributes. type keyType string -const key = keyType("grpc.grpclb.state") +const key = keyType("grpc.grpclb.grpclbstate") // State contains gRPCLB-relevant data passed from the name resolver. type State struct { diff --git a/test/performance/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/test/performance/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 64a232f281..66d141fce7 100644 --- a/test/performance/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/test/performance/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,14 +18,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" @@ -41,10 +40,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Enumerates the type of event // Note the terminology is different from the RPC semantics // definition, but the same meaning is expressed here. diff --git a/test/performance/vendor/google.golang.org/grpc/clientconn.go b/test/performance/vendor/google.golang.org/grpc/clientconn.go index 422639c79d..0456689045 100644 --- a/test/performance/vendor/google.golang.org/grpc/clientconn.go +++ b/test/performance/vendor/google.golang.org/grpc/clientconn.go @@ -788,10 +788,16 @@ func (cc *ClientConn) incrCallsFailed() { func (ac *addrConn) connect() error { ac.mu.Lock() if ac.state == connectivity.Shutdown { + if logger.V(2) { + logger.Infof("connect called on shutdown addrConn; ignoring.") + } ac.mu.Unlock() return errConnClosing } if ac.state != connectivity.Idle { + if logger.V(2) { + logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state) + } ac.mu.Unlock() return nil } @@ -1268,6 +1274,9 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onGoAway, onClose) if err != nil { + if logger.V(2) { + logger.Infof("Creating new client transport to %q: %v", addr, err) + } // newTr is either nil, or closed. hcancel() channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) diff --git a/test/performance/vendor/google.golang.org/grpc/dialoptions.go b/test/performance/vendor/google.golang.org/grpc/dialoptions.go index 9372dc322e..8f5b536f11 100644 --- a/test/performance/vendor/google.golang.org/grpc/dialoptions.go +++ b/test/performance/vendor/google.golang.org/grpc/dialoptions.go @@ -116,8 +116,9 @@ func newFuncDialOption(f func(*dialOptions)) *funcDialOption { // be twice the size to keep syscalls low. The default value for this buffer is // 32KB. // -// Zero will disable the write buffer such that each write will be on underlying -// connection. Note: A Send call may not directly translate to a write. +// Zero or negative values will disable the write buffer such that each write +// will be on underlying connection. Note: A Send call may not directly +// translate to a write. func WithWriteBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.WriteBufferSize = s @@ -127,8 +128,9 @@ func WithWriteBufferSize(s int) DialOption { // WithReadBufferSize lets you set the size of read buffer, this determines how // much data can be read at most for each read syscall. // -// The default value for this buffer is 32KB. Zero will disable read buffer for -// a connection so data framer can access the underlying conn directly. +// The default value for this buffer is 32KB. Zero or negative values will +// disable read buffer for a connection so data framer can access the +// underlying conn directly. func WithReadBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.ReadBufferSize = s diff --git a/test/performance/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/test/performance/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 179f4a26d1..85e3ff2816 100644 --- a/test/performance/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/test/performance/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -121,7 +121,7 @@ func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated // but not counted towards the size limit. continue } - currentEntryLen := uint64(len(entry.Value)) + currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue())) if currentEntryLen > bytesLimit { break } diff --git a/test/performance/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/test/performance/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 75301c5149..d51302e65c 100644 --- a/test/performance/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/test/performance/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -32,7 +32,7 @@ import ( "sync" "time" - grpclbstate "google.golang.org/grpc/balancer/grpclb/state" + grpclbstate "google.golang.org/grpc/balancer/grpclb/grpclbstate" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" @@ -140,10 +140,10 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts disableServiceConfig: opts.DisableServiceConfig, } - if target.Authority == "" { + if target.URL.Host == "" { d.resolver = defaultResolver } else { - d.resolver, err = customAuthorityResolver(target.Authority) + d.resolver, err = customAuthorityResolver(target.URL.Host) if err != nil { return nil, err } diff --git a/test/performance/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/test/performance/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go index 520d9229e1..c6e08221ff 100644 --- a/test/performance/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go +++ b/test/performance/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -20,13 +20,20 @@ // name without scheme back to gRPC as resolved address. package passthrough -import "google.golang.org/grpc/resolver" +import ( + "errors" + + "google.golang.org/grpc/resolver" +) const scheme = "passthrough" type passthroughBuilder struct{} func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + if target.Endpoint == "" && opts.Dialer == nil { + return nil, errors.New("passthrough: received empty target in Build()") + } r := &passthroughResolver{ target: target, cc: cc, diff --git a/test/performance/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/test/performance/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go index 7f1a702cac..1609116877 100644 --- a/test/performance/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +++ b/test/performance/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -34,8 +34,8 @@ type builder struct { } func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { - if target.Authority != "" { - return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority) + if target.URL.Host != "" { + return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host) } // gRPC was parsing the dial target manually before PR #4817, and we diff --git a/test/performance/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/test/performance/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 409769f48f..aaa9c859a3 100644 --- a/test/performance/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/test/performance/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -191,7 +191,7 @@ type goAway struct { code http2.ErrCode debugData []byte headsUp bool - closeConn bool + closeConn error // if set, loopyWriter will exit, resulting in conn closure } func (*goAway) isTransportResponseFrame() bool { return false } @@ -209,6 +209,14 @@ type outFlowControlSizeRequest struct { func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } +// closeConnection is an instruction to tell the loopy writer to flush the +// framer and exit, which will cause the transport's connection to be closed +// (by the client or server). The transport itself will close after the reader +// encounters the EOF caused by the connection closure. +type closeConnection struct{} + +func (closeConnection) isTransportResponseFrame() bool { return false } + type outStreamState int const ( @@ -408,7 +416,7 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { select { case <-c.ch: case <-c.done: - return nil, ErrConnClosing + return nil, errors.New("transport closed by client") } } } @@ -519,18 +527,6 @@ const minBatchSize = 1000 // As an optimization, to increase the batch size for each flush, loopy yields the processor, once // if the batch size is too low to give stream goroutines a chance to fill it up. func (l *loopyWriter) run() (err error) { - defer func() { - if err == ErrConnClosing { - // Don't log ErrConnClosing as error since it happens - // 1. When the connection is closed by some other known issue. - // 2. User closed the connection. - // 3. A graceful close of connection. - if logger.V(logLevel) { - logger.Infof("transport: loopyWriter.run returning. %v", err) - } - err = nil - } - }() for { it, err := l.cbuf.get(true) if err != nil { @@ -574,7 +570,6 @@ func (l *loopyWriter) run() (err error) { } l.framer.writer.Flush() break hasdata - } } } @@ -662,11 +657,10 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { func (l *loopyWriter) originateStream(str *outStream) error { hdr := str.itl.dequeue().(*headerFrame) if err := hdr.initStream(str.id); err != nil { - if err == ErrConnClosing { - return err + if err == errStreamDrain { // errStreamDrain need not close transport + return nil } - // Other errors(errStreamDrain) need not close transport. - return nil + return err } if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { return err @@ -764,7 +758,7 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { } } if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { - return ErrConnClosing + return errors.New("finished processing active streams while in draining mode") } return nil } @@ -799,7 +793,7 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { if l.side == clientSide { l.draining = true if len(l.estdStreams) == 0 { - return ErrConnClosing + return errors.New("received GOAWAY with no active streams") } } return nil @@ -817,6 +811,14 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { return nil } +func (l *loopyWriter) closeConnectionHandler() error { + l.framer.writer.Flush() + // Exit loopyWriter entirely by returning an error here. This will lead to + // the transport closing the connection, and, ultimately, transport + // closure. + return ErrConnClosing +} + func (l *loopyWriter) handle(i interface{}) error { switch i := i.(type) { case *incomingWindowUpdate: @@ -845,6 +847,8 @@ func (l *loopyWriter) handle(i interface{}) error { return l.goAwayHandler(i) case *outFlowControlSizeRequest: return l.outFlowControlSizeRequestHandler(i) + case closeConnection: + return l.closeConnectionHandler() default: return fmt.Errorf("transport: unknown control message type %T", i) } diff --git a/test/performance/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/test/performance/vendor/google.golang.org/grpc/internal/transport/handler_server.go index fb272235d8..ebe8bfe330 100644 --- a/test/performance/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/test/performance/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -46,24 +46,32 @@ import ( "google.golang.org/grpc/status" ) -// NewServerHandlerTransport returns a ServerTransport handling gRPC -// from inside an http.Handler. It requires that the http Server -// supports HTTP/2. +// NewServerHandlerTransport returns a ServerTransport handling gRPC from +// inside an http.Handler, or writes an HTTP error to w and returns an error. +// It requires that the http Server supports HTTP/2. func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { if r.ProtoMajor != 2 { - return nil, errors.New("gRPC requires HTTP/2") + msg := "gRPC requires HTTP/2" + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } if r.Method != "POST" { - return nil, errors.New("invalid gRPC request method") + msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } contentType := r.Header.Get("Content-Type") // TODO: do we assume contentType is lowercase? we did before contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) if !validContentType { - return nil, errors.New("invalid gRPC request content-type") + msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType) + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } if _, ok := w.(http.Flusher); !ok { - return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + msg := "gRPC requires a ResponseWriter supporting http.Flusher" + http.Error(w, msg, http.StatusInternalServerError) + return nil, errors.New(msg) } st := &serverHandlerTransport{ @@ -79,7 +87,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s if v := r.Header.Get("grpc-timeout"); v != "" { to, err := decodeTimeout(v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) + msg := fmt.Sprintf("malformed time-out: %v", err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } st.timeoutSet = true st.timeout = to @@ -97,7 +107,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s for _, v := range vv { v, err := decodeMetadataHeader(k, v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) + msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } metakv = append(metakv, k, v) } @@ -141,12 +153,15 @@ type serverHandlerTransport struct { stats []stats.Handler } -func (ht *serverHandlerTransport) Close() { - ht.closeOnce.Do(ht.closeCloseChanOnce) +func (ht *serverHandlerTransport) Close(err error) { + ht.closeOnce.Do(func() { + if logger.V(logLevel) { + logger.Infof("Closing serverHandlerTransport: %v", err) + } + close(ht.closedCh) + }) } -func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } - func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } // strAddr is a net.Addr backed by either a TCP "ip:port" string, or @@ -236,7 +251,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro }) } } - ht.Close() + ht.Close(errors.New("finished writing status")) return err } @@ -346,7 +361,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace case <-ht.req.Context().Done(): } cancel() - ht.Close() + ht.Close(errors.New("request is done processing")) }() req := ht.req diff --git a/test/performance/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/test/performance/vendor/google.golang.org/grpc/internal/transport/http2_client.go index d518b07e16..3e582a2853 100644 --- a/test/performance/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/test/performance/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -59,11 +59,15 @@ var clientConnectionCounter uint64 // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { - lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. - ctx context.Context - cancel context.CancelFunc - ctxDone <-chan struct{} // Cache the ctx.Done() chan. - userAgent string + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + // address contains the resolver returned address for this transport. + // If the `ServerName` field is set, it takes precedence over `CallHdr.Host` + // passed to `NewStream`, when determining the :authority header. + address resolver.Address md metadata.MD conn net.Conn // underlying communication channel loopy *loopyWriter @@ -238,8 +242,11 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts go func(conn net.Conn) { defer ctxMonitorDone.Fire() // Signal this goroutine has exited. <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes. - if connectCtx.Err() != nil { + if err := connectCtx.Err(); err != nil { // connectCtx expired before exiting the function. Hard close the connection. + if logger.V(logLevel) { + logger.Infof("newClientTransport: aborting due to connectCtx: %v", err) + } conn.Close() } }(conn) @@ -314,6 +321,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts cancel: cancel, userAgent: opts.UserAgent, registeredCompressors: grpcutil.RegisteredCompressors(), + address: addr, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), @@ -440,10 +448,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts go func() { t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) err := t.loopy.run() - if err != nil { - if logger.V(logLevel) { - logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) - } + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err) } // Do not close the transport. Let reader goroutine handle it since // there might be data in the buffers. @@ -702,6 +708,18 @@ func (e NewStreamError) Error() string { // streams. All non-nil errors returned will be *NewStreamError. func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { ctx = peer.NewContext(ctx, t.getPeer()) + + // ServerName field of the resolver returned address takes precedence over + // Host field of CallHdr to determine the :authority header. This is because, + // the ServerName field takes precedence for server authentication during + // TLS handshake, and the :authority header should match the value used + // for server authentication. + if t.address.ServerName != "" { + newCallHdr := *callHdr + newCallHdr.Host = t.address.ServerName + callHdr = &newCallHdr + } + headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} @@ -934,6 +952,9 @@ func (t *http2Client) Close(err error) { t.mu.Unlock() return } + if logger.V(logLevel) { + logger.Infof("transport: closing: %v", err) + } // Call t.onClose ASAP to prevent the client from attempting to create new // streams. t.onClose() @@ -986,11 +1007,14 @@ func (t *http2Client) GracefulClose() { t.mu.Unlock() return } + if logger.V(logLevel) { + logger.Infof("transport: GracefulClose called") + } t.state = draining active := len(t.activeStreams) t.mu.Unlock() if active == 0 { - t.Close(ErrConnClosing) + t.Close(connectionErrorf(true, nil, "no active streams left to process while draining")) return } t.controlBuf.put(&incomingGoAway{}) diff --git a/test/performance/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/test/performance/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 3dd15647bc..37e089bc84 100644 --- a/test/performance/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/test/performance/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -21,6 +21,7 @@ package transport import ( "bytes" "context" + "errors" "fmt" "io" "math" @@ -41,6 +42,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -101,13 +103,13 @@ type http2Server struct { mu sync.Mutex // guard the following - // drainChan is initialized when Drain() is called the first time. - // After which the server writes out the first GoAway(with ID 2^31-1) frame. - // Then an independent goroutine will be launched to later send the second GoAway. - // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. - // Thus call to Drain() will be a no-op if drainChan is already initialized since draining is - // already underway. - drainChan chan struct{} + // drainEvent is initialized when Drain() is called the first time. After + // which the server writes out the first GoAway(with ID 2^31-1) frame. Then + // an independent goroutine will be launched to later send the second + // GoAway. During this time we don't want to write another first GoAway(with + // ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is + // already initialized since draining is already underway. + drainEvent *grpcsync.Event state transportState activeStreams map[uint32]*Stream // idle is the time instant when the connection went idle. @@ -293,7 +295,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, defer func() { if err != nil { - t.Close() + t.Close(err) } }() @@ -331,10 +333,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, go func() { t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler - if err := t.loopy.run(); err != nil { - if logger.V(logLevel) { - logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) - } + err := t.loopy.run() + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err) } t.conn.Close() t.controlBuf.finish() @@ -344,8 +345,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, return t, nil } -// operateHeader takes action on the decoded headers. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { +// operateHeaders takes action on the decoded headers. Returns an error if fatal +// error encountered and transport needs to close, otherwise returns nil. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -361,15 +363,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeFrameSize, onWrite: func() {}, }) - return false + return nil } if streamID%2 != 1 || streamID <= t.maxStreamID { // illegal gRPC stream id. - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) - } - return true + return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame) } t.maxStreamID = streamID @@ -453,7 +452,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( status: status.New(codes.Internal, errMsg), rst: !frame.StreamEnded(), }) - return false + return nil } if !isGRPC || headerError { @@ -463,7 +462,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeProtocol, onWrite: func() {}, }) - return false + return nil } // "If :authority is missing, Host must be renamed to :authority." - A41 @@ -503,7 +502,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( if t.state != reachable { t.mu.Unlock() s.cancel() - return false + return nil } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() @@ -514,7 +513,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( onWrite: func() {}, }) s.cancel() - return false + return nil } if httpMethod != http.MethodPost { t.mu.Unlock() @@ -530,7 +529,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rst: !frame.StreamEnded(), }) s.cancel() - return false + return nil } if t.inTapHandle != nil { var err error @@ -550,7 +549,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( status: stat, rst: !frame.StreamEnded(), }) - return false + return nil } } t.activeStreams[streamID] = s @@ -597,7 +596,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( wq: s.wq, }) handle(s) - return false + return nil } // HandleStreams receives incoming streams using the given handler. This is @@ -630,19 +629,16 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. continue } if err == io.EOF || err == io.ErrUnexpectedEOF { - t.Close() + t.Close(err) return } - if logger.V(logLevel) { - logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) - } - t.Close() + t.Close(err) return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if t.operateHeaders(frame, handle, traceCtx) { - t.Close() + if err := t.operateHeaders(frame, handle, traceCtx); err != nil { + t.Close(err) break } case *http2.DataFrame: @@ -843,8 +839,8 @@ const ( func (t *http2Server) handlePing(f *http2.PingFrame) { if f.IsAck() { - if f.Data == goAwayPing.data && t.drainChan != nil { - close(t.drainChan) + if f.Data == goAwayPing.data && t.drainEvent != nil { + t.drainEvent.Fire() return } // Maybe it's a BDP ping. @@ -886,10 +882,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { if t.pingStrikes > maxPingStrikes { // Send goaway and close the connection. - if logger.V(logLevel) { - logger.Errorf("transport: Got too many pings from the client, closing the connection.") - } - t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")}) } } @@ -1153,7 +1146,7 @@ func (t *http2Server) keepalive() { if logger.V(logLevel) { logger.Infof("transport: closing server transport due to maximum connection age.") } - t.Close() + t.controlBuf.put(closeConnection{}) case <-t.done: } return @@ -1169,10 +1162,7 @@ func (t *http2Server) keepalive() { continue } if outstandingPing && kpTimeoutLeft <= 0 { - if logger.V(logLevel) { - logger.Infof("transport: closing server transport due to idleness.") - } - t.Close() + t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time)) return } if !outstandingPing { @@ -1199,12 +1189,15 @@ func (t *http2Server) keepalive() { // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. -func (t *http2Server) Close() { +func (t *http2Server) Close(err error) { t.mu.Lock() if t.state == closing { t.mu.Unlock() return } + if logger.V(logLevel) { + logger.Infof("transport: closing: %v", err) + } t.state = closing streams := t.activeStreams t.activeStreams = nil @@ -1295,10 +1288,10 @@ func (t *http2Server) RemoteAddr() net.Addr { func (t *http2Server) Drain() { t.mu.Lock() defer t.mu.Unlock() - if t.drainChan != nil { + if t.drainEvent != nil { return } - t.drainChan = make(chan struct{}) + t.drainEvent = grpcsync.NewEvent() t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) } @@ -1319,19 +1312,20 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { // Stop accepting more streams now. t.state = draining sid := t.maxStreamID + retErr := g.closeConn if len(t.activeStreams) == 0 { - g.closeConn = true + retErr = errors.New("second GOAWAY written and no active streams left to process") } t.mu.Unlock() t.maxStreamMu.Unlock() if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { return false, err } - if g.closeConn { + if retErr != nil { // Abruptly close the connection following the GoAway (via // loopywriter). But flush out what's inside the buffer first. t.framer.writer.Flush() - return false, fmt.Errorf("transport: Connection closing") + return false, retErr } return true, nil } @@ -1353,7 +1347,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { timer := time.NewTimer(time.Minute) defer timer.Stop() select { - case <-t.drainChan: + case <-t.drainEvent.Done(): case <-timer.C: case <-t.done: return diff --git a/test/performance/vendor/google.golang.org/grpc/internal/transport/transport.go b/test/performance/vendor/google.golang.org/grpc/internal/transport/transport.go index 2e615ee20c..6cff20c8e0 100644 --- a/test/performance/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/test/performance/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -701,7 +701,7 @@ type ServerTransport interface { // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. - Close() + Close(err error) // RemoteAddr returns the remote network address. RemoteAddr() net.Addr diff --git a/test/performance/vendor/google.golang.org/grpc/pickfirst.go b/test/performance/vendor/google.golang.org/grpc/pickfirst.go index fb7a99e0a2..b3a55481b9 100644 --- a/test/performance/vendor/google.golang.org/grpc/pickfirst.go +++ b/test/performance/vendor/google.golang.org/grpc/pickfirst.go @@ -102,8 +102,8 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState b.subConn = subConn b.state = connectivity.Idle b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.Idle, - Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}}, + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) b.subConn.Connect() return nil diff --git a/test/performance/vendor/google.golang.org/grpc/regenerate.sh b/test/performance/vendor/google.golang.org/grpc/regenerate.sh index 99db79fafc..a6f26c8ab0 100644 --- a/test/performance/vendor/google.golang.org/grpc/regenerate.sh +++ b/test/performance/vendor/google.golang.org/grpc/regenerate.sh @@ -57,7 +57,8 @@ LEGACY_SOURCES=( ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto profiling/proto/service.proto - reflection/grpc_reflection_v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto ) # Generates only the new gRPC Service symbols @@ -119,8 +120,4 @@ mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/ # see grpc_testing_not_regenerate/README.md for details. rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go -# grpc/testing does not have a go_package option. -mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ -mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/ - cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/test/performance/vendor/google.golang.org/grpc/server.go b/test/performance/vendor/google.golang.org/grpc/server.go index f4dde72b41..2808b7c83e 100644 --- a/test/performance/vendor/google.golang.org/grpc/server.go +++ b/test/performance/vendor/google.golang.org/grpc/server.go @@ -233,10 +233,11 @@ func newJoinServerOption(opts ...ServerOption) ServerOption { return &joinServerOption{opts: opts} } -// WriteBufferSize determines how much data can be batched before doing a write on the wire. -// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. -// The default value for this buffer is 32KB. -// Zero will disable the write buffer such that each write will be on underlying connection. +// WriteBufferSize determines how much data can be batched before doing a write +// on the wire. The corresponding memory allocation for this buffer will be +// twice the size to keep syscalls low. The default value for this buffer is +// 32KB. Zero or negative values will disable the write buffer such that each +// write will be on underlying connection. // Note: A Send call may not directly translate to a write. func WriteBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { @@ -244,11 +245,10 @@ func WriteBufferSize(s int) ServerOption { }) } -// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most -// for one read syscall. -// The default value for this buffer is 32KB. -// Zero will disable read buffer for a connection so data framer can access the underlying -// conn directly. +// ReadBufferSize lets you set the size of read buffer, this determines how much +// data can be read at most for one read syscall. The default value for this +// buffer is 32KB. Zero or negative values will disable read buffer for a +// connection so data framer can access the underlying conn directly. func ReadBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.readBufferSize = s @@ -942,7 +942,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { } func (s *Server) serveStreams(st transport.ServerTransport) { - defer st.Close() + defer st.Close(errors.New("finished serving streams for the server transport")) var wg sync.WaitGroup var roundRobinCounter uint32 @@ -1008,7 +1008,8 @@ var _ http.Handler = (*Server)(nil) func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + // Errors returned from transport.NewServerHandlerTransport have + // already been written to w. return } if !s.addConn(listenerAddressForServeHTTP, st) { @@ -1046,7 +1047,7 @@ func (s *Server) addConn(addr string, st transport.ServerTransport) bool { s.mu.Lock() defer s.mu.Unlock() if s.conns == nil { - st.Close() + st.Close(errors.New("Server.addConn called when server has already been stopped")) return false } if s.drain { @@ -1150,21 +1151,16 @@ func chainUnaryServerInterceptors(s *Server) { func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { - // the struct ensures the variables are allocated together, rather than separately, since we - // know they should be garbage collected together. This saves 1 allocation and decreases - // time/call by about 10% on the microbenchmark. - var state struct { - i int - next UnaryHandler - } - state.next = func(ctx context.Context, req interface{}) (interface{}, error) { - if state.i == len(interceptors)-1 { - return interceptors[state.i](ctx, req, info, handler) - } - state.i++ - return interceptors[state.i-1](ctx, req, info, state.next) - } - return state.next(ctx, req) + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) + } +} + +func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(ctx context.Context, req interface{}) (interface{}, error) { + return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) } } @@ -1470,21 +1466,16 @@ func chainStreamServerInterceptors(s *Server) { func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { - // the struct ensures the variables are allocated together, rather than separately, since we - // know they should be garbage collected together. This saves 1 allocation and decreases - // time/call by about 10% on the microbenchmark. - var state struct { - i int - next StreamHandler - } - state.next = func(srv interface{}, ss ServerStream) error { - if state.i == len(interceptors)-1 { - return interceptors[state.i](srv, ss, info, handler) - } - state.i++ - return interceptors[state.i-1](srv, ss, info, state.next) - } - return state.next(srv, ss) + return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) + } +} + +func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(srv interface{}, stream ServerStream) error { + return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) } } @@ -1819,7 +1810,7 @@ func (s *Server) Stop() { } for _, cs := range conns { for st := range cs { - st.Close() + st.Close(errors.New("Server.Stop called")) } } if s.opts.numServerWorkers > 0 { diff --git a/test/performance/vendor/google.golang.org/grpc/stream.go b/test/performance/vendor/google.golang.org/grpc/stream.go index 960c3e33df..0f8e6c0149 100644 --- a/test/performance/vendor/google.golang.org/grpc/stream.go +++ b/test/performance/vendor/google.golang.org/grpc/stream.go @@ -416,7 +416,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) ctx = trace.NewContext(ctx, trInfo.tr) } - if cs.cc.parsedTarget.Scheme == "xds" { + if cs.cc.parsedTarget.URL.Scheme == "xds" { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( diff --git a/test/performance/vendor/google.golang.org/grpc/version.go b/test/performance/vendor/google.golang.org/grpc/version.go index 2198e7098d..243e06e8d3 100644 --- a/test/performance/vendor/google.golang.org/grpc/version.go +++ b/test/performance/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.51.0" +const Version = "1.52.0" diff --git a/test/performance/vendor/google.golang.org/grpc/vet.sh b/test/performance/vendor/google.golang.org/grpc/vet.sh index bd8e0cdb33..1d03c09148 100644 --- a/test/performance/vendor/google.golang.org/grpc/vet.sh +++ b/test/performance/vendor/google.golang.org/grpc/vet.sh @@ -121,8 +121,9 @@ done # # TODO(dfawley): don't use deprecated functions in examples or first-party # plugins. +# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs. SC_OUT="$(mktemp)" -staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true +staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true # Error if anything other than deprecation warnings are printed. not grep -v "is deprecated:.*SA1019" "${SC_OUT}" # Only ignore the following deprecated types/fields/functions. diff --git a/test/performance/vendor/modules.txt b/test/performance/vendor/modules.txt index a3df2b5c0c..0c9810419f 100644 --- a/test/performance/vendor/modules.txt +++ b/test/performance/vendor/modules.txt @@ -69,7 +69,7 @@ github.com/inconshreveable/mousetrap # github.com/kelseyhightower/envconfig v1.4.0 ## explicit github.com/kelseyhightower/envconfig -# github.com/klauspost/compress v1.15.13 +# github.com/klauspost/compress v1.15.15 ## explicit; go 1.17 github.com/klauspost/compress/s2 # github.com/klauspost/cpuid/v2 v2.1.0 @@ -178,15 +178,13 @@ github.com/nxadm/tail/winfile # github.com/orcaman/concurrent-map v1.0.0 ## explicit github.com/orcaman/concurrent-map -# github.com/pelletier/go-toml v1.9.5 -## explicit; go 1.12 -github.com/pelletier/go-toml -# github.com/pelletier/go-toml/v2 v2.0.5 +# github.com/pelletier/go-toml/v2 v2.0.6 ## explicit; go 1.16 github.com/pelletier/go-toml/v2 -github.com/pelletier/go-toml/v2/internal/ast +github.com/pelletier/go-toml/v2/internal/characters github.com/pelletier/go-toml/v2/internal/danger github.com/pelletier/go-toml/v2/internal/tracker +github.com/pelletier/go-toml/v2/unstable # github.com/pmezard/go-difflib v1.0.0 ## explicit github.com/pmezard/go-difflib/difflib @@ -212,7 +210,7 @@ github.com/prometheus/common/model github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/rs/cors v1.8.2 +# github.com/rs/cors v1.8.3 ## explicit; go 1.13 github.com/rs/cors # github.com/sanity-io/litter v1.5.5 @@ -238,7 +236,7 @@ github.com/shirou/gopsutil/v3/process # github.com/sirupsen/logrus v1.9.0 ## explicit; go 1.13 github.com/sirupsen/logrus -# github.com/spf13/afero v1.9.2 +# github.com/spf13/afero v1.9.3 ## explicit; go 1.16 github.com/spf13/afero github.com/spf13/afero/internal/common @@ -255,7 +253,7 @@ github.com/spf13/jwalterweatherman # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag -# github.com/spf13/viper v1.14.0 +# github.com/spf13/viper v1.15.0 ## explicit; go 1.17 github.com/spf13/viper github.com/spf13/viper/internal/encoding @@ -274,7 +272,7 @@ github.com/stretchr/objx github.com/stretchr/testify/assert github.com/stretchr/testify/mock github.com/stretchr/testify/require -# github.com/subosito/gotenv v1.4.1 +# github.com/subosito/gotenv v1.4.2 ## explicit; go 1.18 github.com/subosito/gotenv # github.com/tklauser/go-sysconf v0.3.10 @@ -337,17 +335,17 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.1.0 ## explicit golang.org/x/time/rate -# google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 +# google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.51.0 +# google.golang.org/grpc v1.52.0 ## explicit; go 1.17 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base -google.golang.org/grpc/balancer/grpclb/state +google.golang.org/grpc/balancer/grpclb/grpclbstate google.golang.org/grpc/balancer/roundrobin google.golang.org/grpc/binarylog/grpc_binarylog_v1 google.golang.org/grpc/channelz diff --git a/test/testdata/configs/nginx-agent.conf b/test/testdata/configs/nginx-agent.conf index c025c5edb7..3f68fab64f 100644 --- a/test/testdata/configs/nginx-agent.conf +++ b/test/testdata/configs/nginx-agent.conf @@ -1,18 +1,14 @@ server: host: 127.0.0.1 grpcPort: 443 - token: goodfellow metrics: agent-ingest command: dataplane-manager - target: 127.0.0.1:443 config-dirs: /etc/nginx:/usr/local/etc/nginx log: level: info path: /var/log/nginx-agent/agent.log tls: enable: true -nginx: - bin_path: /usr/sbin/nginx dataplane: status: poll_interval: 30s diff --git a/vendor/github.com/AlekSi/pointer/LICENSE b/vendor/github.com/AlekSi/pointer/LICENSE new file mode 100644 index 0000000000..55f8794c86 --- /dev/null +++ b/vendor/github.com/AlekSi/pointer/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Alexey Palazhchenko + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/AlekSi/pointer/README.md b/vendor/github.com/AlekSi/pointer/README.md new file mode 100644 index 0000000000..64c72d735b --- /dev/null +++ b/vendor/github.com/AlekSi/pointer/README.md @@ -0,0 +1,44 @@ +# pointer + +[![Go Reference](https://pkg.go.dev/badge/github.com/AlekSi/pointer.svg)](https://pkg.go.dev/github.com/AlekSi/pointer) + +Go package `pointer` provides helpers to convert between pointers and values of built-in +(and, with Go 1.18+ generics, of any) types. + +``` +go get github.com/AlekSi/pointer +``` + +API is stable. [Documentation](https://pkg.go.dev/github.com/AlekSi/pointer). + +```go +package motivationalexample + +import ( + "encoding/json" + + "github.com/AlekSi/pointer" +) + +const ( + defaultName = "some name" +) + +// Stuff contains optional fields. +type Stuff struct { + Name *string + Comment *string + Value *int64 + Time *time.Time +} + +// SomeStuff makes some JSON-encoded stuff. +func SomeStuff() (data []byte, err error) { + return json.Marshal(&Stuff{ + Name: pointer.ToString(defaultName), // can't say &defaultName + Comment: pointer.ToString("not yet"), // can't say &"not yet" + Value: pointer.ToInt64(42), // can't say &42 or &int64(42) + Time: pointer.ToTime(time.Date(2014, 6, 25, 12, 24, 40, 0, time.UTC)), // can't say &time.Date(…) + }) +} +``` diff --git a/vendor/github.com/AlekSi/pointer/generic.go b/vendor/github.com/AlekSi/pointer/generic.go new file mode 100644 index 0000000000..1ef976b1fc --- /dev/null +++ b/vendor/github.com/AlekSi/pointer/generic.go @@ -0,0 +1,36 @@ +//go:build go1.18 +// +build go1.18 + +package pointer + +// To returns a pointer to the passed value. +func To[T any](t T) *T { + return &t +} + +// ToOrNil returns a pointer to the passed value, or nil, if the passed value is a zero value. +// If the passed value has `IsZero() bool` method (for example, time.Time instance), +// it is used to determine if the value is zero. +func ToOrNil[T comparable](t T) *T { + if z, ok := any(t).(interface{ IsZero() bool }); ok { + if z.IsZero() { + return nil + } + return &t + } + + var zero T + if t == zero { + return nil + } + return &t +} + +// Get returns the value from the passed pointer or the zero value if the pointer is nil. +func Get[T any](t *T) T { + if t == nil { + var zero T + return zero + } + return *t +} diff --git a/vendor/github.com/AlekSi/pointer/pointer.go b/vendor/github.com/AlekSi/pointer/pointer.go new file mode 100644 index 0000000000..8203febdf7 --- /dev/null +++ b/vendor/github.com/AlekSi/pointer/pointer.go @@ -0,0 +1,124 @@ +// Package pointer provides helpers to convert between pointers and values of built-in (and, with generics, of any) types. +package pointer // import "github.com/AlekSi/pointer" + +import ( + "time" +) + +/* +Order as in spec: + bool byte complex64 complex128 error float32 float64 + int int8 int16 int32 int64 rune string + uint uint8 uint16 uint32 uint64 uintptr + time.Duration time.Time +*/ + +// ToBool returns a pointer to the passed bool value. +func ToBool(b bool) *bool { + return &b +} + +// ToByte returns a pointer to the passed byte value. +func ToByte(b byte) *byte { + return &b +} + +// ToComplex64 returns a pointer to the passed complex64 value. +func ToComplex64(c complex64) *complex64 { + return &c +} + +// ToComplex128 returns a pointer to the passed complex128 value. +func ToComplex128(c complex128) *complex128 { + return &c +} + +// ToError returns a pointer to the passed error value. +func ToError(e error) *error { + return &e +} + +// ToFloat32 returns a pointer to the passed float32 value. +func ToFloat32(f float32) *float32 { + return &f +} + +// ToFloat64 returns a pointer to the passed float64 value. +func ToFloat64(f float64) *float64 { + return &f +} + +// ToInt returns a pointer to the passed int value. +func ToInt(i int) *int { + return &i +} + +// ToInt8 returns a pointer to the passed int8 value. +func ToInt8(i int8) *int8 { + return &i +} + +// ToInt16 returns a pointer to the passed int16 value. +func ToInt16(i int16) *int16 { + return &i +} + +// ToInt32 returns a pointer to the passed int32 value. +func ToInt32(i int32) *int32 { + return &i +} + +// ToInt64 returns a pointer to the passed int64 value. +func ToInt64(i int64) *int64 { + return &i +} + +// ToRune returns a pointer to the passed rune value. +func ToRune(r rune) *rune { + return &r +} + +// ToString returns a pointer to the passed string value. +func ToString(s string) *string { + return &s +} + +// ToUint returns a pointer to the passed uint value. +func ToUint(u uint) *uint { + return &u +} + +// ToUint8 returns a pointer to the passed uint8 value. +func ToUint8(u uint8) *uint8 { + return &u +} + +// ToUint16 returns a pointer to the passed uint16 value. +func ToUint16(u uint16) *uint16 { + return &u +} + +// ToUint32 returns a pointer to the passed uint32 value. +func ToUint32(u uint32) *uint32 { + return &u +} + +// ToUint64 returns a pointer to the passed uint64 value. +func ToUint64(u uint64) *uint64 { + return &u +} + +// ToUintptr returns a pointer to the passed uintptr value. +func ToUintptr(u uintptr) *uintptr { + return &u +} + +// ToDuration returns a pointer to the passed time.Duration value. +func ToDuration(d time.Duration) *time.Duration { + return &d +} + +// ToTime returns a pointer to the passed time.Time value. +func ToTime(t time.Time) *time.Time { + return &t +} diff --git a/vendor/github.com/AlekSi/pointer/pointer_or_nil.go b/vendor/github.com/AlekSi/pointer/pointer_or_nil.go new file mode 100644 index 0000000000..f464d2c610 --- /dev/null +++ b/vendor/github.com/AlekSi/pointer/pointer_or_nil.go @@ -0,0 +1,189 @@ +package pointer + +import ( + "time" +) + +/* +Order as in spec: + bool byte complex64 complex128 error float32 float64 + int int8 int16 int32 int64 rune string + uint uint8 uint16 uint32 uint64 uintptr + time.Duration time.Time +*/ + +// ToBoolOrNil returns a pointer to the passed bool value, or nil, if passed value is a zero value. +func ToBoolOrNil(b bool) *bool { + if b == false { + return nil + } + return &b +} + +// ToByteOrNil returns a pointer to the passed byte value, or nil, if passed value is a zero value. +func ToByteOrNil(b byte) *byte { + if b == 0 { + return nil + } + return &b +} + +// ToComplex64OrNil returns a pointer to the passed complex64 value, or nil, if passed value is a zero value. +func ToComplex64OrNil(c complex64) *complex64 { + if c == 0 { + return nil + } + return &c +} + +// ToComplex128OrNil returns a pointer to the passed complex128 value, or nil, if passed value is a zero value. +func ToComplex128OrNil(c complex128) *complex128 { + if c == 0 { + return nil + } + return &c +} + +// ToErrorOrNil returns a pointer to the passed error value, or nil, if passed value is a zero value. +func ToErrorOrNil(e error) *error { + if e == nil { + return nil + } + return &e +} + +// ToFloat32OrNil returns a pointer to the passed float32 value, or nil, if passed value is a zero value. +func ToFloat32OrNil(f float32) *float32 { + if f == 0 { + return nil + } + return &f +} + +// ToFloat64OrNil returns a pointer to the passed float64 value, or nil, if passed value is a zero value. +func ToFloat64OrNil(f float64) *float64 { + if f == 0 { + return nil + } + return &f +} + +// ToIntOrNil returns a pointer to the passed int value, or nil, if passed value is a zero value. +func ToIntOrNil(i int) *int { + if i == 0 { + return nil + } + return &i +} + +// ToInt8OrNil returns a pointer to the passed int8 value, or nil, if passed value is a zero value. +func ToInt8OrNil(i int8) *int8 { + if i == 0 { + return nil + } + return &i +} + +// ToInt16OrNil returns a pointer to the passed int16 value, or nil, if passed value is a zero value. +func ToInt16OrNil(i int16) *int16 { + if i == 0 { + return nil + } + return &i +} + +// ToInt32OrNil returns a pointer to the passed int32 value, or nil, if passed value is a zero value. +func ToInt32OrNil(i int32) *int32 { + if i == 0 { + return nil + } + return &i +} + +// ToInt64OrNil returns a pointer to the passed int64 value, or nil, if passed value is a zero value. +func ToInt64OrNil(i int64) *int64 { + if i == 0 { + return nil + } + return &i +} + +// ToRuneOrNil returns a pointer to the passed rune value, or nil, if passed value is a zero value. +func ToRuneOrNil(r rune) *rune { + if r == 0 { + return nil + } + return &r +} + +// ToStringOrNil returns a pointer to the passed string value, or nil, if passed value is a zero value. +func ToStringOrNil(s string) *string { + if s == "" { + return nil + } + return &s +} + +// ToUintOrNil returns a pointer to the passed uint value, or nil, if passed value is a zero value. +func ToUintOrNil(u uint) *uint { + if u == 0 { + return nil + } + return &u +} + +// ToUint8OrNil returns a pointer to the passed uint8 value, or nil, if passed value is a zero value. +func ToUint8OrNil(u uint8) *uint8 { + if u == 0 { + return nil + } + return &u +} + +// ToUint16OrNil returns a pointer to the passed uint16 value, or nil, if passed value is a zero value. +func ToUint16OrNil(u uint16) *uint16 { + if u == 0 { + return nil + } + return &u +} + +// ToUint32OrNil returns a pointer to the passed uint32 value, or nil, if passed value is a zero value. +func ToUint32OrNil(u uint32) *uint32 { + if u == 0 { + return nil + } + return &u +} + +// ToUint64OrNil returns a pointer to the passed uint64 value, or nil, if passed value is a zero value. +func ToUint64OrNil(u uint64) *uint64 { + if u == 0 { + return nil + } + return &u +} + +// ToUintptrOrNil returns a pointer to the passed uintptr value, or nil, if passed value is a zero value. +func ToUintptrOrNil(u uintptr) *uintptr { + if u == 0 { + return nil + } + return &u +} + +// ToDurationOrNil returns a pointer to the passed time.Duration value, or nil, if passed value is a zero value. +func ToDurationOrNil(d time.Duration) *time.Duration { + if d == 0 { + return nil + } + return &d +} + +// ToTimeOrNil returns a pointer to the passed time.Time value, or nil, if passed value is a zero value (t.IsZero() returns true). +func ToTimeOrNil(t time.Time) *time.Time { + if t.IsZero() { + return nil + } + return &t +} diff --git a/vendor/github.com/AlekSi/pointer/value.go b/vendor/github.com/AlekSi/pointer/value.go new file mode 100644 index 0000000000..23928ac03d --- /dev/null +++ b/vendor/github.com/AlekSi/pointer/value.go @@ -0,0 +1,189 @@ +package pointer + +import ( + "time" +) + +/* +Order as in spec: + bool byte complex64 complex128 error float32 float64 + int int8 int16 int32 int64 rune string + uint uint8 uint16 uint32 uint64 uintptr + time.Duration time.Time +*/ + +// GetBool returns the value of the bool pointer passed in or false if the pointer is nil. +func GetBool(b *bool) bool { + if b == nil { + return false + } + return *b +} + +// GetByte returns the value of the byte pointer passed in or 0 if the pointer is nil. +func GetByte(b *byte) byte { + if b == nil { + return 0 + } + return *b +} + +// GetComplex64 returns the value of the complex64 pointer passed in or 0 if the pointer is nil. +func GetComplex64(c *complex64) complex64 { + if c == nil { + return 0 + } + return *c +} + +// GetComplex128 returns the value of the complex128 pointer passed in or 0 if the pointer is nil. +func GetComplex128(c *complex128) complex128 { + if c == nil { + return 0 + } + return *c +} + +// GetError returns the value of the error pointer passed in or nil if the pointer is nil. +func GetError(e *error) error { + if e == nil { + return nil + } + return *e +} + +// GetFloat32 returns the value of the float32 pointer passed in or 0 if the pointer is nil. +func GetFloat32(f *float32) float32 { + if f == nil { + return 0 + } + return *f +} + +// GetFloat64 returns the value of the float64 pointer passed in or 0 if the pointer is nil. +func GetFloat64(f *float64) float64 { + if f == nil { + return 0 + } + return *f +} + +// GetInt returns the value of the int pointer passed in or 0 if the pointer is nil. +func GetInt(i *int) int { + if i == nil { + return 0 + } + return *i +} + +// GetInt8 returns the value of the int8 pointer passed in or 0 if the pointer is nil. +func GetInt8(i *int8) int8 { + if i == nil { + return 0 + } + return *i +} + +// GetInt16 returns the value of the int16 pointer passed in or 0 if the pointer is nil. +func GetInt16(i *int16) int16 { + if i == nil { + return 0 + } + return *i +} + +// GetInt32 returns the value of the int32 pointer passed in or 0 if the pointer is nil. +func GetInt32(i *int32) int32 { + if i == nil { + return 0 + } + return *i +} + +// GetInt64 returns the value of the int64 pointer passed in or 0 if the pointer is nil. +func GetInt64(i *int64) int64 { + if i == nil { + return 0 + } + return *i +} + +// GetRune returns the value of the rune pointer passed in or 0 if the pointer is nil. +func GetRune(r *rune) rune { + if r == nil { + return 0 + } + return *r +} + +// GetString returns the value of the string pointer passed in or empty string if the pointer is nil. +func GetString(s *string) string { + if s == nil { + return "" + } + return *s +} + +// GetUint returns the value of the uint pointer passed in or 0 if the pointer is nil. +func GetUint(u *uint) uint { + if u == nil { + return 0 + } + return *u +} + +// GetUint8 returns the value of the uint8 pointer passed in or 0 if the pointer is nil. +func GetUint8(u *uint8) uint8 { + if u == nil { + return 0 + } + return *u +} + +// GetUint16 returns the value of the uint16 pointer passed in or 0 if the pointer is nil. +func GetUint16(u *uint16) uint16 { + if u == nil { + return 0 + } + return *u +} + +// GetUint32 returns the value of the uint32 pointer passed in or 0 if the pointer is nil. +func GetUint32(u *uint32) uint32 { + if u == nil { + return 0 + } + return *u +} + +// GetUint64 returns the value of the uint64 pointer passed in or 0 if the pointer is nil. +func GetUint64(u *uint64) uint64 { + if u == nil { + return 0 + } + return *u +} + +// GetUintptr returns the value of the uintptr pointer passed in or 0 if the pointer is nil. +func GetUintptr(u *uintptr) uintptr { + if u == nil { + return 0 + } + return *u +} + +// GetDuration returns the value of the duration pointer passed in or 0 if the pointer is nil. +func GetDuration(d *time.Duration) time.Duration { + if d == nil { + return 0 + } + return *d +} + +// GetTime returns the value of the time pointer passed in or zero time.Time if the pointer is nil. +func GetTime(t *time.Time) time.Time { + if t == nil { + return time.Time{} + } + return *t +} diff --git a/vendor/github.com/Azure/go-ansiterm/SECURITY.md b/vendor/github.com/Azure/go-ansiterm/SECURITY.md new file mode 100644 index 0000000000..e138ec5d6a --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/SECURITY.md @@ -0,0 +1,41 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). + + diff --git a/vendor/github.com/Masterminds/semver/.travis.yml b/vendor/github.com/Masterminds/semver/.travis.yml index 3d9ebadb93..096369d44d 100644 --- a/vendor/github.com/Masterminds/semver/.travis.yml +++ b/vendor/github.com/Masterminds/semver/.travis.yml @@ -6,6 +6,8 @@ go: - 1.8.x - 1.9.x - 1.10.x + - 1.11.x + - 1.12.x - tip # Setting sudo access to false will let Travis CI use containers rather than diff --git a/vendor/github.com/Masterminds/semver/CHANGELOG.md b/vendor/github.com/Masterminds/semver/CHANGELOG.md index b888e20aba..e405c9a84d 100644 --- a/vendor/github.com/Masterminds/semver/CHANGELOG.md +++ b/vendor/github.com/Masterminds/semver/CHANGELOG.md @@ -1,3 +1,26 @@ +# 1.5.0 (2019-09-11) + +## Added + +- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c) + +## Changed + +- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil) +- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil) +- #72: Adding docs comment pointing to vert for a cli +- #71: Update the docs on pre-release comparator handling +- #89: Test with new go versions (thanks @thedevsaddam) +- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll) + +## Fixed + +- #78: Fix unchecked error in example code (thanks @ravron) +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case +- #97: Fixed copyright file for proper display on GitHub +- #107: Fix handling prerelease when sorting alphanum and num +- #109: Fixed where Validate sometimes returns wrong message on error + # 1.4.2 (2018-04-10) ## Changed diff --git a/vendor/github.com/Masterminds/semver/LICENSE.txt b/vendor/github.com/Masterminds/semver/LICENSE.txt index 0da4aeadb0..9ff7da9c48 100644 --- a/vendor/github.com/Masterminds/semver/LICENSE.txt +++ b/vendor/github.com/Masterminds/semver/LICENSE.txt @@ -1,5 +1,4 @@ -The Masterminds -Copyright (C) 2014-2015, Matt Butcher and Matt Farina +Copyright (C) 2014-2019, Matt Butcher and Matt Farina Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/Masterminds/semver/README.md b/vendor/github.com/Masterminds/semver/README.md index 3e934ed71e..1b52d2f436 100644 --- a/vendor/github.com/Masterminds/semver/README.md +++ b/vendor/github.com/Masterminds/semver/README.md @@ -11,6 +11,9 @@ The `semver` package provides the ability to work with [Semantic Versions](http: Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) [![Build Status](https://travis-ci.org/Masterminds/semver.svg)](https://travis-ci.org/Masterminds/semver) [![Build status](https://ci.appveyor.com/api/projects/status/jfk66lib7hb985k8/branch/master?svg=true&passingText=windows%20build%20passing&failingText=windows%20build%20failing)](https://ci.appveyor.com/project/mattfarina/semver/branch/master) [![GoDoc](https://godoc.org/github.com/Masterminds/semver?status.svg)](https://godoc.org/github.com/Masterminds/semver) [![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) +If you are looking for a command line tool for version comparisons please see +[vert](https://github.com/Masterminds/vert) which uses this library. + ## Parsing Semantic Versions To parse a semantic version use the `NewVersion` function. For example, @@ -80,14 +83,32 @@ The basic comparisons are: * `>=`: greater than or equal to * `<=`: less than or equal to -_Note, according to the Semantic Version specification pre-releases may not be -API compliant with their release counterpart. It says,_ +## Working With Pre-release Versions + +Pre-releases, for those not familiar with them, are used for software releases +prior to stable or generally available releases. Examples of pre-releases include +development, alpha, beta, and release candidate releases. A pre-release may be +a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the +order of precidence, pre-releases come before their associated releases. In this +example `1.2.3-beta.1 < 1.2.3`. + +According to the Semantic Version specification pre-releases may not be +API compliant with their release counterpart. It says, + +> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. -> _A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version._ +SemVer comparisons without a pre-release comparator will skip pre-release versions. +For example, `>=1.2.3` will skip pre-releases when looking at a list of releases +while `>=1.2.3-0` will evaluate and find pre-releases. -_SemVer comparisons without a pre-release value will skip pre-release versions. -For example, `>1.2.3` will skip pre-releases when looking at a list of values -while `>1.2.3-alpha.1` will evaluate pre-releases._ +The reason for the `0` as a pre-release version in the example comparison is +because pre-releases can only contain ASCII alphanumerics and hyphens (along with +`.` separators), per the spec. Sorting happens in ASCII sort order, again per the spec. The lowest character is a `0` in ASCII sort order (see an [ASCII Table](http://www.asciitable.com/)) + +Understanding ASCII sort ordering is important because A-Z comes before a-z. That +means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case +sensitivity doesn't apply here. This is due to ASCII sort ordering which is what +the spec specifies. ## Hyphen Range Comparisons @@ -105,7 +126,7 @@ back to the pack level comparison (see tilde below). For example, * `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` * `>= 1.2.x` is equivalent to `>= 1.2.0` -* `<= 2.x` is equivalent to `<= 3` +* `<= 2.x` is equivalent to `< 3` * `*` is equivalent to `>= 0.0.0` ## Tilde Range Comparisons (Patch) @@ -126,6 +147,7 @@ The caret (`^`) comparison operator is for major level changes. This is useful when comparisons of API versions as a major change is API breaking. For example, * `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` +* `^0.0.1` is equivalent to `>= 0.0.1, < 1.0.0` * `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` * `^2.3` is equivalent to `>= 2.3, < 3` * `^2.x` is equivalent to `>= 2.0.0, < 3` @@ -159,6 +181,13 @@ version didn't meet the constraint is returned. For example, } ``` +# Fuzzing + + [dvyukov/go-fuzz](https://github.com/dvyukov/go-fuzz) is used for fuzzing. + +1. `go-fuzz-build` +2. `go-fuzz -workdir=fuzz` + # Contribute If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) diff --git a/vendor/github.com/Masterminds/semver/constraints.go b/vendor/github.com/Masterminds/semver/constraints.go index a41a6a7a4a..b94b93413f 100644 --- a/vendor/github.com/Masterminds/semver/constraints.go +++ b/vendor/github.com/Masterminds/semver/constraints.go @@ -65,13 +65,30 @@ func (cs Constraints) Check(v *Version) bool { func (cs Constraints) Validate(v *Version) (bool, []error) { // loop over the ORs and check the inner ANDs var e []error + + // Capture the prerelease message only once. When it happens the first time + // this var is marked + var prerelesase bool for _, o := range cs.constraints { joy := true for _, c := range o { - if !c.check(v) { - em := fmt.Errorf(c.msg, v, c.orig) - e = append(e, em) + // Before running the check handle the case there the version is + // a prerelease and the check is not searching for prereleases. + if c.con.pre == "" && v.pre != "" { + if !prerelesase { + em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + e = append(e, em) + prerelesase = true + } joy = false + + } else { + + if !c.check(v) { + em := fmt.Errorf(c.msg, v, c.orig) + e = append(e, em) + joy = false + } } } @@ -233,12 +250,6 @@ func constraintNotEqual(v *Version, c *constraint) bool { func constraintGreaterThan(v *Version, c *constraint) bool { - // An edge case the constraint is 0.0.0 and the version is 0.0.0-someprerelease - // exists. This that case. - if !isNonZero(c.con) && isNonZero(v) { - return true - } - // If there is a pre-release on the version but the constraint isn't looking // for them assume that pre-releases are not compatible. See issue 21 for // more details. @@ -271,11 +282,6 @@ func constraintLessThan(v *Version, c *constraint) bool { } func constraintGreaterThanEqual(v *Version, c *constraint) bool { - // An edge case the constraint is 0.0.0 and the version is 0.0.0-someprerelease - // exists. This that case. - if !isNonZero(c.con) && isNonZero(v) { - return true - } // If there is a pre-release on the version but the constraint isn't looking // for them assume that pre-releases are not compatible. See issue 21 for @@ -415,12 +421,3 @@ func rewriteRange(i string) string { return o } - -// Detect if a version is not zero (0.0.0) -func isNonZero(v *Version) bool { - if v.Major() != 0 || v.Minor() != 0 || v.Patch() != 0 || v.Prerelease() != "" { - return true - } - - return false -} diff --git a/vendor/github.com/Masterminds/semver/doc.go b/vendor/github.com/Masterminds/semver/doc.go index e00f65eb73..6a6c24c6d6 100644 --- a/vendor/github.com/Masterminds/semver/doc.go +++ b/vendor/github.com/Masterminds/semver/doc.go @@ -47,7 +47,7 @@ parts of the package. // Handle constraint not being parseable. } - v, _ := semver.NewVersion("1.3") + v, err := semver.NewVersion("1.3") if err != nil { // Handle version not being parseable. } diff --git a/vendor/github.com/Masterminds/semver/version.go b/vendor/github.com/Masterminds/semver/version.go index 9d22ea6308..400d4f9341 100644 --- a/vendor/github.com/Masterminds/semver/version.go +++ b/vendor/github.com/Masterminds/semver/version.go @@ -34,7 +34,7 @@ const SemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + // ValidPrerelease is the regular expression which validates // both prerelease and metadata values. -const ValidPrerelease string = `^([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*)` +const ValidPrerelease string = `^([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*)$` // Version represents a single semantic version. type Version struct { @@ -106,7 +106,7 @@ func MustParse(v string) *Version { // Note, if the original version contained a leading v this version will not. // See the Original() method to retrieve the original value. Semantic Versions // don't contain a leading v per the spec. Instead it's optional on -// impelementation. +// implementation. func (v *Version) String() string { var buf bytes.Buffer @@ -394,10 +394,14 @@ func comparePrePart(s, o string) int { } // When comparing strings "99" is greater than "103". To handle - // cases like this we need to detect numbers and compare them. - - oi, n1 := strconv.ParseInt(o, 10, 64) - si, n2 := strconv.ParseInt(s, 10, 64) + // cases like this we need to detect numbers and compare them. According + // to the semver spec, numbers are always positive. If there is a - at the + // start like -99 this is to be evaluated as an alphanum. numbers always + // have precedence over alphanum. Parsing as Uints because negative numbers + // are ignored. + + oi, n1 := strconv.ParseUint(o, 10, 64) + si, n2 := strconv.ParseUint(s, 10, 64) // The case where both are strings compare the strings if n1 != nil && n2 != nil { diff --git a/vendor/github.com/Masterminds/semver/version_fuzz.go b/vendor/github.com/Masterminds/semver/version_fuzz.go new file mode 100644 index 0000000000..b42bcd62b9 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/version_fuzz.go @@ -0,0 +1,10 @@ +// +build gofuzz + +package semver + +func Fuzz(data []byte) int { + if _, err := NewVersion(string(data)); err != nil { + return 0 + } + return 1 +} diff --git a/vendor/github.com/Masterminds/sprig/.travis.yml b/vendor/github.com/Masterminds/sprig/.travis.yml index 2e7c2d68e3..b9da8b825b 100644 --- a/vendor/github.com/Masterminds/sprig/.travis.yml +++ b/vendor/github.com/Masterminds/sprig/.travis.yml @@ -3,6 +3,9 @@ language: go go: - 1.9.x - 1.10.x + - 1.11.x + - 1.12.x + - 1.13.x - tip # Setting sudo access to false will let Travis CI use containers rather than diff --git a/vendor/github.com/Masterminds/sprig/CHANGELOG.md b/vendor/github.com/Masterminds/sprig/CHANGELOG.md index 445937138a..6a79fbde46 100644 --- a/vendor/github.com/Masterminds/sprig/CHANGELOG.md +++ b/vendor/github.com/Masterminds/sprig/CHANGELOG.md @@ -1,5 +1,134 @@ # Changelog +## Release 2.22.0 (2019-10-02) + +### Added + +- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos) +- #195: Added deepCopy function for use with dicts + +### Changed + +- Updated merge and mergeOverwrite documentation to explain copying and how to + use deepCopy with it + +## Release 2.21.0 (2019-09-18) + +### Added + +- #122: Added encryptAES/decryptAES functions (thanks @n0madic) +- #128: Added toDecimal support (thanks @Dean-Coakley) +- #169: Added list contcat (thanks @astorath) +- #174: Added deepEqual function (thanks @bonifaido) +- #170: Added url parse and join functions (thanks @astorath) + +### Changed + +- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify + +### Fixed + +- #172: Fix semver wildcard example (thanks @piepmatz) +- #175: Fix dateInZone doc example (thanks @s3than) + +## Release 2.20.0 (2019-06-18) + +### Added + +- #164: Adding function to get unix epoch for a time (@mattfarina) +- #166: Adding tests for date_in_zone (@mattfarina) + +### Changed + +- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam) +- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19) +- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan) + +### Fixed + +## Release 2.19.0 (2019-03-02) + +IMPORTANT: This release reverts a change from 2.18.0 + +In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random. + +We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience. + +### Changed + +- Fix substr panic 35fb796 (Alexey igrychev) +- Remove extra period 1eb7729 (Matthew Lorimor) +- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor) +- README edits/fixes/suggestions 08fe136 (Lauri Apple) + + +## Release 2.18.0 (2019-02-12) + +### Added + +- Added mergeOverwrite function +- cryptographic functions that use secure random (see fe1de12) + +### Changed + +- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer) +- Handle has for nil list 9c10885 (Daniel Cohen) +- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder) +- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic) +- Replace outdated goutils imports 01893d2 (Matthew Lorimor) +- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor) +- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen) + +### Fixed + +- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder) +- Fix substr var names and comments d581f80 (Dean Coakley) +- Fix substr documentation 2737203 (Dean Coakley) + +## Release 2.17.1 (2019-01-03) + +### Fixed + +The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml. + +## Release 2.17.0 (2019-01-03) + +### Added + +- adds alder32sum function and test 6908fc2 (marshallford) +- Added kebabcase function ca331a1 (Ilyes512) + +### Changed + +- Update goutils to 1.1.0 4e1125d (Matt Butcher) + +### Fixed + +- Fix 'has' documentation e3f2a85 (dean-coakley) +- docs(dict): fix typo in pick example dc424f9 (Dustin Specker) +- fixes spelling errors... not sure how that happened 4cf188a (marshallford) + +## Release 2.16.0 (2018-08-13) + +### Added + +- add splitn function fccb0b0 (Helgi Þorbjörnsson) +- Add slice func df28ca7 (gongdo) +- Generate serial number a3bdffd (Cody Coons) +- Extract values of dict with values function df39312 (Lawrence Jones) + +### Changed + +- Modify panic message for list.slice ae38335 (gongdo) +- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap) +- Remove duplicated documentation 1d97af1 (Matthew Fisher) +- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson) + +### Fixed + +- Fix file permissions c5f40b5 (gongdo) +- Fix example for buildCustomCert 7779e0d (Tin Lam) + ## Release 2.15.0 (2018-04-02) ### Added diff --git a/vendor/github.com/Masterminds/sprig/README.md b/vendor/github.com/Masterminds/sprig/README.md index 25bf3d4f4b..b70569585f 100644 --- a/vendor/github.com/Masterminds/sprig/README.md +++ b/vendor/github.com/Masterminds/sprig/README.md @@ -4,21 +4,22 @@ The Go language comes with a [built-in template language](http://golang.org/pkg/text/template/), but not -very many template functions. This library provides a group of commonly +very many template functions. Sprig is a library that provides more than 100 commonly used template functions. It is inspired by the template functions found in -[Twig](http://twig.sensiolabs.org/documentation) and also in various +[Twig](http://twig.sensiolabs.org/documentation) and in various JavaScript libraries, such as [underscore.js](http://underscorejs.org/). ## Usage -Template developers can read the [Sprig function documentation](http://masterminds.github.io/sprig/) to -learn about the >100 template functions available. +**Template developers**: Please use Sprig's [function documentation](http://masterminds.github.io/sprig/) for +detailed instructions and code snippets for the >100 template functions available. -For Go developers wishing to include Sprig as a library in their programs, -API documentation is available [at GoDoc.org](http://godoc.org/github.com/Masterminds/sprig), but -read on for standard usage. +**Go developers**: If you'd like to include Sprig as a library in your program, +our API documentation is available [at GoDoc.org](http://godoc.org/github.com/Masterminds/sprig). + +For standard usage, read on. ### Load the Sprig library @@ -40,31 +41,27 @@ tpl := template.Must( ``` -### Call the functions inside of templates +### Calling the functions inside of templates By convention, all functions are lowercase. This seems to follow the Go idiom for template functions (as opposed to template methods, which are -TitleCase). - - -Example: +TitleCase). For example, this: ``` {{ "hello!" | upper | repeat 5 }} ``` -Produces: +produces this: ``` HELLO!HELLO!HELLO!HELLO!HELLO! ``` -## Principles: +## Principles Driving Our Function Selection -The following principles were used in deciding on which functions to add, and -determining how to implement them. +We followed these principles to decide which functions to add and how to implement them: -- Template functions should be used to build layout. Therefore, the following +- Use template functions to build layout. The following types of operations are within the domain of template functions: - Formatting - Layout @@ -73,7 +70,7 @@ determining how to implement them. - Template functions should not return errors unless there is no way to print a sensible value. For example, converting a string to an integer should not produce an error if conversion fails. Instead, it should display a default - value that can be displayed. + value. - Simple math is necessary for grid layouts, pagers, and so on. Complex math (anything other than arithmetic) should be done outside of templates. - Template functions only deal with the data passed into them. They never retrieve diff --git a/vendor/github.com/Masterminds/sprig/crypto.go b/vendor/github.com/Masterminds/sprig/crypto.go index a91c4a7045..7a418ba88d 100644 --- a/vendor/github.com/Masterminds/sprig/crypto.go +++ b/vendor/github.com/Masterminds/sprig/crypto.go @@ -2,6 +2,8 @@ package sprig import ( "bytes" + "crypto/aes" + "crypto/cipher" "crypto/dsa" "crypto/ecdsa" "crypto/elliptic" @@ -19,6 +21,8 @@ import ( "encoding/pem" "errors" "fmt" + "io" + "hash/adler32" "math/big" "net" "time" @@ -37,6 +41,11 @@ func sha1sum(input string) string { return hex.EncodeToString(hash[:]) } +func adler32sum(input string) string { + hash := adler32.Checksum([]byte(input)) + return fmt.Sprintf("%d", hash) +} + // uuidv4 provides a safe and secure UUID v4 implementation func uuidv4() string { return fmt.Sprintf("%s", uuid.New()) @@ -369,8 +378,13 @@ func getBaseCertTemplate( if err != nil { return nil, err } + serialNumberUpperBound := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberUpperBound) + if err != nil { + return nil, err + } return &x509.Certificate{ - SerialNumber: big.NewInt(1), + SerialNumber: serialNumber, Subject: pkix.Name{ CommonName: cn, }, @@ -428,3 +442,61 @@ func getAlternateDNSStrs(alternateDNS []interface{}) ([]string, error) { } return alternateDNSStrs, nil } + +func encryptAES(password string, plaintext string) (string, error) { + if plaintext == "" { + return "", nil + } + + key := make([]byte, 32) + copy(key, []byte(password)) + block, err := aes.NewCipher(key) + if err != nil { + return "", err + } + + content := []byte(plaintext) + blockSize := block.BlockSize() + padding := blockSize - len(content)%blockSize + padtext := bytes.Repeat([]byte{byte(padding)}, padding) + content = append(content, padtext...) + + ciphertext := make([]byte, aes.BlockSize+len(content)) + + iv := ciphertext[:aes.BlockSize] + if _, err := io.ReadFull(rand.Reader, iv); err != nil { + return "", err + } + + mode := cipher.NewCBCEncrypter(block, iv) + mode.CryptBlocks(ciphertext[aes.BlockSize:], content) + + return base64.StdEncoding.EncodeToString(ciphertext), nil +} + +func decryptAES(password string, crypt64 string) (string, error) { + if crypt64 == "" { + return "", nil + } + + key := make([]byte, 32) + copy(key, []byte(password)) + + crypt, err := base64.StdEncoding.DecodeString(crypt64) + if err != nil { + return "", err + } + + block, err := aes.NewCipher(key) + if err != nil { + return "", err + } + + iv := crypt[:aes.BlockSize] + crypt = crypt[aes.BlockSize:] + decrypted := make([]byte, len(crypt)) + mode := cipher.NewCBCDecrypter(block, iv) + mode.CryptBlocks(decrypted, crypt) + + return string(decrypted[:len(decrypted)-int(decrypted[len(decrypted)-1])]), nil +} diff --git a/vendor/github.com/Masterminds/sprig/date.go b/vendor/github.com/Masterminds/sprig/date.go index 1c2c3653c8..d1d6155d72 100644 --- a/vendor/github.com/Masterminds/sprig/date.go +++ b/vendor/github.com/Masterminds/sprig/date.go @@ -1,6 +1,7 @@ package sprig import ( + "strconv" "time" ) @@ -28,6 +29,8 @@ func dateInZone(fmt string, date interface{}, zone string) string { t = time.Now() case time.Time: t = date + case *time.Time: + t = *date case int64: t = time.Unix(date, 0) case int: @@ -74,3 +77,7 @@ func toDate(fmt, str string) time.Time { t, _ := time.ParseInLocation(fmt, str, time.Local) return t } + +func unixEpoch(date time.Time) string { + return strconv.FormatInt(date.Unix(), 10) +} diff --git a/vendor/github.com/Masterminds/sprig/defaults.go b/vendor/github.com/Masterminds/sprig/defaults.go index f0161317dc..ed6a8ab291 100644 --- a/vendor/github.com/Masterminds/sprig/defaults.go +++ b/vendor/github.com/Masterminds/sprig/defaults.go @@ -49,7 +49,6 @@ func empty(given interface{}) bool { case reflect.Struct: return false } - return true } // coalesce returns the first non-empty value. diff --git a/vendor/github.com/Masterminds/sprig/dict.go b/vendor/github.com/Masterminds/sprig/dict.go index 59076c0182..738405b433 100644 --- a/vendor/github.com/Masterminds/sprig/dict.go +++ b/vendor/github.com/Masterminds/sprig/dict.go @@ -1,6 +1,9 @@ package sprig -import "github.com/imdario/mergo" +import ( + "github.com/imdario/mergo" + "github.com/mitchellh/copystructure" +) func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { d[key] = value @@ -86,3 +89,31 @@ func merge(dst map[string]interface{}, srcs ...map[string]interface{}) interface } return dst } + +func mergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { + for _, src := range srcs { + if err := mergo.MergeWithOverwrite(&dst, src); err != nil { + // Swallow errors inside of a template. + return "" + } + } + return dst +} + +func values(dict map[string]interface{}) []interface{} { + values := []interface{}{} + for _, value := range dict { + values = append(values, value) + } + + return values +} + +func deepCopy(i interface{}) interface{} { + c, err := copystructure.Copy(i) + if err != nil { + panic("deepCopy error: " + err.Error()) + } + + return c +} diff --git a/vendor/github.com/Masterminds/sprig/doc.go b/vendor/github.com/Masterminds/sprig/doc.go index 92ea318c7e..8f8f1d7370 100644 --- a/vendor/github.com/Masterminds/sprig/doc.go +++ b/vendor/github.com/Masterminds/sprig/doc.go @@ -14,220 +14,6 @@ Note that you should add the function map before you parse any template files. appear in the standard library. This is to make it easier to pipe arguments into functions. -Date Functions - - - date FORMAT TIME: Format a date, where a date is an integer type or a time.Time type, and - format is a time.Format formatting string. - - dateModify: Given a date, modify it with a duration: `date_modify "-1.5h" now`. If the duration doesn't - parse, it returns the time unaltered. See `time.ParseDuration` for info on duration strings. - - now: Current time.Time, for feeding into date-related functions. - - htmlDate TIME: Format a date for use in the value field of an HTML "date" form element. - - dateInZone FORMAT TIME TZ: Like date, but takes three arguments: format, timestamp, - timezone. - - htmlDateInZone TIME TZ: Like htmlDate, but takes two arguments: timestamp, - timezone. - -String Functions - - - abbrev: Truncate a string with ellipses. `abbrev 5 "hello world"` yields "he..." - - abbrevboth: Abbreviate from both sides, yielding "...lo wo..." - - trunc: Truncate a string (no suffix). `trunc 5 "Hello World"` yields "hello". - - trim: strings.TrimSpace - - trimAll: strings.Trim, but with the argument order reversed `trimAll "$" "$5.00"` or `"$5.00 | trimAll "$"` - - trimSuffix: strings.TrimSuffix, but with the argument order reversed: `trimSuffix "-" "ends-with-"` - - trimPrefix: strings.TrimPrefix, but with the argument order reversed `trimPrefix "$" "$5"` - - upper: strings.ToUpper - - lower: strings.ToLower - - nospace: Remove all space characters from a string. `nospace "h e l l o"` becomes "hello" - - title: strings.Title - - untitle: Remove title casing - - repeat: strings.Repeat, but with the arguments switched: `repeat count str`. (This simplifies common pipelines) - - substr: Given string, start, and length, return a substr. - - initials: Given a multi-word string, return the initials. `initials "Matt Butcher"` returns "MB" - - randAlphaNum: Given a length, generate a random alphanumeric sequence - - randAlpha: Given a length, generate an alphabetic string - - randAscii: Given a length, generate a random ASCII string (symbols included) - - randNumeric: Given a length, generate a string of digits. - - swapcase: SwapCase swaps the case of a string using a word based algorithm. see https://godoc.org/github.com/Masterminds/goutils#SwapCase - - shuffle: Shuffle randomizes runes in a string and returns the result. It uses default random source in `math/rand` - - snakecase: convert all upper case characters in a string to underscore format. - - camelcase: convert all lower case characters behind underscores to upper case character - - wrap: Force a line wrap at the given width. `wrap 80 "imagine a longer string"` - - wrapWith: Wrap a line at the given length, but using 'sep' instead of a newline. `wrapWith 50, "
", $html` - - contains: strings.Contains, but with the arguments switched: `contains substr str`. (This simplifies common pipelines) - - hasPrefix: strings.hasPrefix, but with the arguments switched - - hasSuffix: strings.hasSuffix, but with the arguments switched - - quote: Wrap string(s) in double quotation marks, escape the contents by adding '\' before '"'. - - squote: Wrap string(s) in double quotation marks, does not escape content. - - cat: Concatenate strings, separating them by spaces. `cat $a $b $c`. - - indent: Indent a string using space characters. `indent 4 "foo\nbar"` produces " foo\n bar" - - nindent: Indent a string using space characters and prepend a new line. `indent 4 "foo\nbar"` produces "\n foo\n bar" - - replace: Replace an old with a new in a string: `$name | replace " " "-"` - - plural: Choose singular or plural based on length: `len $fish | plural "one anchovy" "many anchovies"` - - sha256sum: Generate a hex encoded sha256 hash of the input - - toString: Convert something to a string - -String Slice Functions: - - - join: strings.Join, but as `join SEP SLICE` - - split: strings.Split, but as `split SEP STRING`. The results are returned - as a map with the indexes set to _N, where N is an integer starting from 0. - Use it like this: `{{$v := "foo/bar/baz" | split "/"}}{{$v._0}}` (Prints `foo`) - - splitList: strings.Split, but as `split SEP STRING`. The results are returned - as an array. - - toStrings: convert a list to a list of strings. 'list 1 2 3 | toStrings' produces '["1" "2" "3"]' - - sortAlpha: sort a list lexicographically. - -Integer Slice Functions: - - - until: Given an integer, returns a slice of counting integers from 0 to one - less than the given integer: `range $i, $e := until 5` - - untilStep: Given start, stop, and step, return an integer slice starting at - 'start', stopping at `stop`, and incrementing by 'step. This is the same - as Python's long-form of 'range'. - -Conversions: - - - atoi: Convert a string to an integer. 0 if the integer could not be parsed. - - int64: Convert a string or another numeric type to an int64. - - int: Convert a string or another numeric type to an int. - - float64: Convert a string or another numeric type to a float64. - -Defaults: - - - default: Give a default value. Used like this: trim " "| default "empty". - Since trim produces an empty string, the default value is returned. For - things with a length (strings, slices, maps), len(0) will trigger the default. - For numbers, the value 0 will trigger the default. For booleans, false will - trigger the default. For structs, the default is never returned (there is - no clear empty condition). For everything else, nil value triggers a default. - - empty: Return true if the given value is the zero value for its type. - Caveats: structs are always non-empty. This should match the behavior of - {{if pipeline}}, but can be used inside of a pipeline. - - coalesce: Given a list of items, return the first non-empty one. - This follows the same rules as 'empty'. '{{ coalesce .someVal 0 "hello" }}` - will return `.someVal` if set, or else return "hello". The 0 is skipped - because it is an empty value. - - compact: Return a copy of a list with all of the empty values removed. - 'list 0 1 2 "" | compact' will return '[1 2]' - - ternary: Given a value,'true | ternary "b" "c"' will return "b". - 'false | ternary "b" "c"' will return '"c"'. Similar to the JavaScript ternary - operator. - -OS: - - env: Resolve an environment variable - - expandenv: Expand a string through the environment - -File Paths: - - base: Return the last element of a path. https://golang.org/pkg/path#Base - - dir: Remove the last element of a path. https://golang.org/pkg/path#Dir - - clean: Clean a path to the shortest equivalent name. (e.g. remove "foo/.." - from "foo/../bar.html") https://golang.org/pkg/path#Clean - - ext: https://golang.org/pkg/path#Ext - - isAbs: https://golang.org/pkg/path#IsAbs - -Encoding: - - b64enc: Base 64 encode a string. - - b64dec: Base 64 decode a string. - -Reflection: - - - typeOf: Takes an interface and returns a string representation of the type. - For pointers, this will return a type prefixed with an asterisk(`*`). So - a pointer to type `Foo` will be `*Foo`. - - typeIs: Compares an interface with a string name, and returns true if they match. - Note that a pointer will not match a reference. For example `*Foo` will not - match `Foo`. - - typeIsLike: Compares an interface with a string name and returns true if - the interface is that `name` or that `*name`. In other words, if the given - value matches the given type or is a pointer to the given type, this returns - true. - - kindOf: Takes an interface and returns a string representation of its kind. - - kindIs: Returns true if the given string matches the kind of the given interface. - - Note: None of these can test whether or not something implements a given - interface, since doing so would require compiling the interface in ahead of - time. - -Data Structures: - - - tuple: Takes an arbitrary list of items and returns a slice of items. Its - tuple-ish properties are mainly gained through the template idiom, and not - through an API provided here. WARNING: The implementation of tuple will - change in the future. - - list: An arbitrary ordered list of items. (This is prefered over tuple.) - - dict: Takes a list of name/values and returns a map[string]interface{}. - The first parameter is converted to a string and stored as a key, the - second parameter is treated as the value. And so on, with odds as keys and - evens as values. If the function call ends with an odd, the last key will - be assigned the empty string. Non-string keys are converted to strings as - follows: []byte are converted, fmt.Stringers will have String() called. - errors will have Error() called. All others will be passed through - fmt.Sprintf("%v"). - -Lists Functions: - -These are used to manipulate lists: '{{ list 1 2 3 | reverse | first }}' - - - first: Get the first item in a 'list'. 'list 1 2 3 | first' prints '1' - - last: Get the last item in a 'list': 'list 1 2 3 | last ' prints '3' - - rest: Get all but the first item in a list: 'list 1 2 3 | rest' returns '[2 3]' - - initial: Get all but the last item in a list: 'list 1 2 3 | initial' returns '[1 2]' - - append: Add an item to the end of a list: 'append $list 4' adds '4' to the end of '$list' - - prepend: Add an item to the beginning of a list: 'prepend $list 4' puts 4 at the beginning of the list. - - reverse: Reverse the items in a list. - - uniq: Remove duplicates from a list. - - without: Return a list with the given values removed: 'without (list 1 2 3) 1' would return '[2 3]' - - has: Return 'true' if the item is found in the list: 'has "foo" $list' will return 'true' if the list contains "foo" - -Dict Functions: - -These are used to manipulate dicts. - - - set: Takes a dict, a key, and a value, and sets that key/value pair in - the dict. `set $dict $key $value`. For convenience, it returns the dict, - even though the dict was modified in place. - - unset: Takes a dict and a key, and deletes that key/value pair from the - dict. `unset $dict $key`. This returns the dict for convenience. - - hasKey: Takes a dict and a key, and returns boolean true if the key is in - the dict. - - pluck: Given a key and one or more maps, get all of the values for that key. - - keys: Get an array of all of the keys in one or more dicts. - - pick: Select just the given keys out of the dict, and return a new dict. - - omit: Return a dict without the given keys. - -Math Functions: - -Integer functions will convert integers of any width to `int64`. If a -string is passed in, functions will attempt to convert with -`strconv.ParseInt(s, 1064)`. If this fails, the value will be treated as 0. - - - add1: Increment an integer by 1 - - add: Sum an arbitrary number of integers - - sub: Subtract the second integer from the first - - div: Divide the first integer by the second - - mod: Module of first integer divided by second - - mul: Multiply integers - - max: Return the biggest of a series of one or more integers - - min: Return the smallest of a series of one or more integers - - biggest: DEPRECATED. Return the biggest of a series of one or more integers - -Crypto Functions: - - - genPrivateKey: Generate a private key for the given cryptosystem. If no - argument is supplied, by default it will generate a private key using - the RSA algorithm. Accepted values are `rsa`, `dsa`, and `ecdsa`. - - derivePassword: Derive a password from the given parameters according to the ["Master Password" algorithm](http://masterpasswordapp.com/algorithm.html) - Given parameters (in order) are: - `counter` (starting with 1), `password_type` (maximum, long, medium, short, basic, or pin), `password`, - `user`, and `site` - -SemVer Functions: - -These functions provide version parsing and comparisons for SemVer 2 version -strings. - - - semver: Parse a semantic version and return a Version object. - - semverCompare: Compare a SemVer range to a particular version. +See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions. */ package sprig diff --git a/vendor/github.com/Masterminds/sprig/functions.go b/vendor/github.com/Masterminds/sprig/functions.go index f0d1bc12c1..7b5b0af86c 100644 --- a/vendor/github.com/Masterminds/sprig/functions.go +++ b/vendor/github.com/Masterminds/sprig/functions.go @@ -5,12 +5,13 @@ import ( "html/template" "os" "path" + "reflect" "strconv" "strings" ttemplate "text/template" "time" - util "github.com/aokoli/goutils" + util "github.com/Masterminds/goutils" "github.com/huandu/xstrings" ) @@ -24,7 +25,7 @@ func FuncMap() template.FuncMap { return HtmlFuncMap() } -// HermeticTextFuncMap returns a 'text/template'.FuncMap with only repeatable functions. +// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions. func HermeticTxtFuncMap() ttemplate.FuncMap { r := TxtFuncMap() for _, name := range nonhermeticFunctions { @@ -42,7 +43,7 @@ func HermeticHtmlFuncMap() template.FuncMap { return r } -// TextFuncMap returns a 'text/template'.FuncMap +// TxtFuncMap returns a 'text/template'.FuncMap func TxtFuncMap() ttemplate.FuncMap { return ttemplate.FuncMap(GenericFuncMap()) } @@ -84,6 +85,9 @@ var nonhermeticFunctions = []string{ // OS "env", "expandenv", + + // Network + "getHostByName", } var genericMap = map[string]interface{}{ @@ -100,6 +104,7 @@ var genericMap = map[string]interface{}{ "dateModify": dateModify, "ago": dateAgo, "toDate": toDate, + "unixEpoch": unixEpoch, // Strings "abbrev": abbrev, @@ -129,28 +134,31 @@ var genericMap = map[string]interface{}{ "shuffle": xstrings.Shuffle, "snakecase": xstrings.ToSnakeCase, "camelcase": xstrings.ToCamelCase, + "kebabcase": xstrings.ToKebabCase, "wrap": func(l int, s string) string { return util.Wrap(s, l) }, "wrapWith": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) }, // Switch order so that "foobar" | contains "foo" - "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, - "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, - "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, - "quote": quote, - "squote": squote, - "cat": cat, - "indent": indent, - "nindent": nindent, - "replace": replace, - "plural": plural, - "sha1sum": sha1sum, - "sha256sum": sha256sum, - "toString": strval, + "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, + "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, + "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, + "quote": quote, + "squote": squote, + "cat": cat, + "indent": indent, + "nindent": nindent, + "replace": replace, + "plural": plural, + "sha1sum": sha1sum, + "sha256sum": sha256sum, + "adler32sum": adler32sum, + "toString": strval, // Wrap Atoi to stop errors. - "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, - "int64": toInt64, - "int": toInt, - "float64": toFloat64, + "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, + "int64": toInt64, + "int": toInt, + "float64": toFloat64, + "toDecimal": toDecimal, //"gt": func(a, b int) bool {return a > b}, //"gte": func(a, b int) bool {return a >= b}, @@ -160,6 +168,8 @@ var genericMap = map[string]interface{}{ // split "/" foo/bar returns map[int]string{0: foo, 1: bar} "split": split, "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) }, + // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu} + "splitn": splitn, "toStrings": strslice, "until": until, @@ -201,6 +211,7 @@ var genericMap = map[string]interface{}{ "empty": empty, "coalesce": coalesce, "compact": compact, + "deepCopy": deepCopy, "toJson": toJson, "toPrettyJson": toPrettyJson, "ternary": ternary, @@ -211,11 +222,15 @@ var genericMap = map[string]interface{}{ "typeIsLike": typeIsLike, "kindOf": kindOf, "kindIs": kindIs, + "deepEqual": reflect.DeepEqual, // OS: "env": func(s string) string { return os.Getenv(s) }, "expandenv": func(s string) string { return os.ExpandEnv(s) }, + // Network: + "getHostByName": getHostByName, + // File Paths: "base": path.Base, "dir": path.Dir, @@ -230,17 +245,19 @@ var genericMap = map[string]interface{}{ "b32dec": base32decode, // Data Structures: - "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. - "list": list, - "dict": dict, - "set": set, - "unset": unset, - "hasKey": hasKey, - "pluck": pluck, - "keys": keys, - "pick": pick, - "omit": omit, - "merge": merge, + "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. + "list": list, + "dict": dict, + "set": set, + "unset": unset, + "hasKey": hasKey, + "pluck": pluck, + "keys": keys, + "pick": pick, + "omit": omit, + "merge": merge, + "mergeOverwrite": mergeOverwrite, + "values": values, "append": push, "push": push, "prepend": prepend, @@ -252,6 +269,8 @@ var genericMap = map[string]interface{}{ "uniq": uniq, "without": without, "has": has, + "slice": slice, + "concat": concat, // Crypto: "genPrivateKey": generatePrivateKey, @@ -260,6 +279,8 @@ var genericMap = map[string]interface{}{ "genCA": generateCertificateAuthority, "genSelfSignedCert": generateSelfSignedCertificate, "genSignedCert": generateSignedCertificate, + "encryptAES": encryptAES, + "decryptAES": decryptAES, // UUIDs: "uuidv4": uuidv4, @@ -278,4 +299,8 @@ var genericMap = map[string]interface{}{ "regexReplaceAll": regexReplaceAll, "regexReplaceAllLiteral": regexReplaceAllLiteral, "regexSplit": regexSplit, + + // URLs: + "urlParse": urlParse, + "urlJoin": urlJoin, } diff --git a/vendor/github.com/Masterminds/sprig/glide.lock b/vendor/github.com/Masterminds/sprig/glide.lock deleted file mode 100644 index 34afeb9c37..0000000000 --- a/vendor/github.com/Masterminds/sprig/glide.lock +++ /dev/null @@ -1,33 +0,0 @@ -hash: 770b6a1132b743dadf6a0bb5fb8bf7083b1a5209f6d6c07826234ab2a97aade9 -updated: 2018-04-02T23:08:56.947456531+02:00 -imports: -- name: github.com/aokoli/goutils - version: 9c37978a95bd5c709a15883b6242714ea6709e64 -- name: github.com/google/uuid - version: 064e2069ce9c359c118179501254f67d7d37ba24 -- name: github.com/huandu/xstrings - version: 3959339b333561bf62a38b424fd41517c2c90f40 -- name: github.com/imdario/mergo - version: 7fe0c75c13abdee74b09fcacef5ea1c6bba6a874 -- name: github.com/Masterminds/goutils - version: 3391d3790d23d03408670993e957e8f408993c34 -- name: github.com/Masterminds/semver - version: 59c29afe1a994eacb71c833025ca7acf874bb1da -- name: github.com/stretchr/testify - version: e3a8ff8ce36581f87a15341206f205b1da467059 - subpackages: - - assert -- name: golang.org/x/crypto - version: d172538b2cfce0c13cee31e647d0367aa8cd2486 - subpackages: - - pbkdf2 - - scrypt -testImports: -- name: github.com/davecgh/go-spew - version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d - subpackages: - - spew -- name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d - subpackages: - - difflib diff --git a/vendor/github.com/Masterminds/sprig/glide.yaml b/vendor/github.com/Masterminds/sprig/glide.yaml index 772ba91344..f317d2b2b1 100644 --- a/vendor/github.com/Masterminds/sprig/glide.yaml +++ b/vendor/github.com/Masterminds/sprig/glide.yaml @@ -3,13 +3,17 @@ import: - package: github.com/Masterminds/goutils version: ^1.0.0 - package: github.com/google/uuid - version: ^0.2 + version: ^1.0.0 - package: golang.org/x/crypto subpackages: - scrypt - package: github.com/Masterminds/semver - version: v1.2.2 + version: ^v1.2.2 - package: github.com/stretchr/testify + version: ^v1.2.2 - package: github.com/imdario/mergo - version: ~0.2.2 + version: ~0.3.7 - package: github.com/huandu/xstrings + version: ^1.2 +- package: github.com/mitchellh/copystructure + version: ^1.0.0 diff --git a/vendor/github.com/Masterminds/sprig/list.go b/vendor/github.com/Masterminds/sprig/list.go index 1860549a94..c0381bbb65 100644 --- a/vendor/github.com/Masterminds/sprig/list.go +++ b/vendor/github.com/Masterminds/sprig/list.go @@ -239,6 +239,9 @@ func without(list interface{}, omit ...interface{}) []interface{} { } func has(needle interface{}, haystack interface{}) bool { + if haystack == nil { + return false + } tp := reflect.TypeOf(haystack).Kind() switch tp { case reflect.Slice, reflect.Array: @@ -257,3 +260,52 @@ func has(needle interface{}, haystack interface{}) bool { panic(fmt.Sprintf("Cannot find has on type %s", tp)) } } + +// $list := [1, 2, 3, 4, 5] +// slice $list -> list[0:5] = list[:] +// slice $list 0 3 -> list[0:3] = list[:3] +// slice $list 3 5 -> list[3:5] +// slice $list 3 -> list[3:5] = list[3:] +func slice(list interface{}, indices ...interface{}) interface{} { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil + } + + var start, end int + if len(indices) > 0 { + start = toInt(indices[0]) + } + if len(indices) < 2 { + end = l + } else { + end = toInt(indices[1]) + } + + return l2.Slice(start, end).Interface() + default: + panic(fmt.Sprintf("list should be type of slice or array but %s", tp)) + } +} + +func concat(lists ...interface{}) interface{} { + var res []interface{} + for _, list := range lists { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + for i := 0; i < l2.Len(); i++ { + res = append(res, l2.Index(i).Interface()) + } + default: + panic(fmt.Sprintf("Cannot concat type %s as list", tp)) + } + } + return res +} diff --git a/vendor/github.com/Masterminds/sprig/network.go b/vendor/github.com/Masterminds/sprig/network.go new file mode 100644 index 0000000000..d786cc7363 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/network.go @@ -0,0 +1,12 @@ +package sprig + +import ( + "math/rand" + "net" +) + +func getHostByName(name string) string { + addrs, _ := net.LookupHost(name) + //TODO: add error handing when release v3 cames out + return addrs[rand.Intn(len(addrs))] +} diff --git a/vendor/github.com/Masterminds/sprig/numeric.go b/vendor/github.com/Masterminds/sprig/numeric.go index 209c62e53a..f4af4af2a7 100644 --- a/vendor/github.com/Masterminds/sprig/numeric.go +++ b/vendor/github.com/Masterminds/sprig/numeric.go @@ -1,6 +1,7 @@ package sprig import ( + "fmt" "math" "reflect" "strconv" @@ -156,4 +157,13 @@ func round(a interface{}, p int, r_opt ...float64) float64 { round = math.Floor(digit) } return round / pow -} \ No newline at end of file +} + +// converts unix octal to decimal +func toDecimal(v interface{}) int64 { + result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64) + if err != nil { + return 0 + } + return result +} diff --git a/vendor/github.com/Masterminds/sprig/regex.go b/vendor/github.com/Masterminds/sprig/regex.go index 9fe033a6bd..2016f66336 100644 --- a/vendor/github.com/Masterminds/sprig/regex.go +++ b/vendor/github.com/Masterminds/sprig/regex.go @@ -32,4 +32,4 @@ func regexReplaceAllLiteral(regex string, s string, repl string) string { func regexSplit(regex string, s string, n int) []string { r := regexp.MustCompile(regex) return r.Split(s, n) -} \ No newline at end of file +} diff --git a/vendor/github.com/Masterminds/sprig/strings.go b/vendor/github.com/Masterminds/sprig/strings.go index f6afa2ff9e..943fa3e8ad 100644 --- a/vendor/github.com/Masterminds/sprig/strings.go +++ b/vendor/github.com/Masterminds/sprig/strings.go @@ -8,7 +8,7 @@ import ( "strconv" "strings" - util "github.com/aokoli/goutils" + util "github.com/Masterminds/goutils" ) func base64encode(v string) string { @@ -57,22 +57,22 @@ func initials(s string) string { func randAlphaNumeric(count int) string { // It is not possible, it appears, to actually generate an error here. - r, _ := util.RandomAlphaNumeric(count) + r, _ := util.CryptoRandomAlphaNumeric(count) return r } func randAlpha(count int) string { - r, _ := util.RandomAlphabetic(count) + r, _ := util.CryptoRandomAlphabetic(count) return r } func randAscii(count int) string { - r, _ := util.RandomAscii(count) + r, _ := util.CryptoRandomAscii(count) return r } func randNumeric(count int) string { - r, _ := util.RandomNumeric(count) + r, _ := util.CryptoRandomNumeric(count) return r } @@ -81,22 +81,27 @@ func untitle(str string) string { } func quote(str ...interface{}) string { - out := make([]string, len(str)) - for i, s := range str { - out[i] = fmt.Sprintf("%q", strval(s)) + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("%q", strval(s))) + } } return strings.Join(out, " ") } func squote(str ...interface{}) string { - out := make([]string, len(str)) - for i, s := range str { - out[i] = fmt.Sprintf("'%v'", s) + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("'%v'", s)) + } } return strings.Join(out, " ") } func cat(v ...interface{}) string { + v = removeNilElements(v) r := strings.TrimSpace(strings.Repeat("%v ", len(v))) return fmt.Sprintf(r, v...) } @@ -126,10 +131,11 @@ func strslice(v interface{}) []string { case []string: return v case []interface{}: - l := len(v) - b := make([]string, l) - for i := 0; i < l; i++ { - b[i] = strval(v[i]) + b := make([]string, 0, len(v)) + for _, s := range v { + if s != nil { + b = append(b, strval(s)) + } } return b default: @@ -137,15 +143,32 @@ func strslice(v interface{}) []string { switch val.Kind() { case reflect.Array, reflect.Slice: l := val.Len() - b := make([]string, l) + b := make([]string, 0, l) for i := 0; i < l; i++ { - b[i] = strval(val.Index(i).Interface()) + value := val.Index(i).Interface() + if value != nil { + b = append(b, strval(value)) + } } return b default: - return []string{strval(v)} + if v == nil { + return []string{} + } else { + return []string{strval(v)} + } + } + } +} + +func removeNilElements(v []interface{}) []interface{} { + newSlice := make([]interface{}, 0, len(v)) + for _, i := range v { + if i != nil { + newSlice = append(newSlice, i) } } + return newSlice } func strval(v interface{}) string { @@ -183,19 +206,28 @@ func split(sep, orig string) map[string]string { return res } +func splitn(sep string, n int, orig string) map[string]string { + parts := strings.SplitN(orig, sep, n) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + // substring creates a substring of the given string. // -// If start is < 0, this calls string[:length]. +// If start is < 0, this calls string[:end]. // -// If start is >= 0 and length < 0, this calls string[start:] +// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:] // -// Otherwise, this calls string[start, length]. -func substring(start, length int, s string) string { +// Otherwise, this calls string[start, end]. +func substring(start, end int, s string) string { if start < 0 { - return s[:length] + return s[:end] } - if length < 0 { + if end < 0 || end > len(s) { return s[start:] } - return s[start:length] + return s[start:end] } diff --git a/vendor/github.com/Masterminds/sprig/url.go b/vendor/github.com/Masterminds/sprig/url.go new file mode 100644 index 0000000000..5f22d801f9 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/url.go @@ -0,0 +1,66 @@ +package sprig + +import ( + "fmt" + "net/url" + "reflect" +) + +func dictGetOrEmpty(dict map[string]interface{}, key string) string { + value, ok := dict[key]; if !ok { + return "" + } + tp := reflect.TypeOf(value).Kind() + if tp != reflect.String { + panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String())) + } + return reflect.ValueOf(value).String() +} + +// parses given URL to return dict object +func urlParse(v string) map[string]interface{} { + dict := map[string]interface{}{} + parsedUrl, err := url.Parse(v) + if err != nil { + panic(fmt.Sprintf("unable to parse url: %s", err)) + } + dict["scheme"] = parsedUrl.Scheme + dict["host"] = parsedUrl.Host + dict["hostname"] = parsedUrl.Hostname() + dict["path"] = parsedUrl.Path + dict["query"] = parsedUrl.RawQuery + dict["opaque"] = parsedUrl.Opaque + dict["fragment"] = parsedUrl.Fragment + if parsedUrl.User != nil { + dict["userinfo"] = parsedUrl.User.String() + } else { + dict["userinfo"] = "" + } + + return dict +} + +// join given dict to URL string +func urlJoin(d map[string]interface{}) string { + resUrl := url.URL{ + Scheme: dictGetOrEmpty(d, "scheme"), + Host: dictGetOrEmpty(d, "host"), + Path: dictGetOrEmpty(d, "path"), + RawQuery: dictGetOrEmpty(d, "query"), + Opaque: dictGetOrEmpty(d, "opaque"), + Fragment: dictGetOrEmpty(d, "fragment"), + + } + userinfo := dictGetOrEmpty(d, "userinfo") + var user *url.Userinfo = nil + if userinfo != "" { + tempUrl, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo)) + if err != nil { + panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err)) + } + user = tempUrl.User + } + + resUrl.User = user + return resUrl.String() +} diff --git a/vendor/github.com/ProtonMail/go-crypto/AUTHORS b/vendor/github.com/ProtonMail/go-crypto/AUTHORS new file mode 100644 index 0000000000..2b00ddba0d --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS b/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS new file mode 100644 index 0000000000..1fbd3e976f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/ProtonMail/go-crypto/LICENSE b/vendor/github.com/ProtonMail/go-crypto/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ProtonMail/go-crypto/PATENTS b/vendor/github.com/ProtonMail/go-crypto/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go b/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go new file mode 100644 index 0000000000..3ed3f43573 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go @@ -0,0 +1,381 @@ +package bitcurves + +// Copyright 2010 The Go Authors. All rights reserved. +// Copyright 2011 ThePiachu. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bitelliptic implements several Koblitz elliptic curves over prime +// fields. + +// This package operates, internally, on Jacobian coordinates. For a given +// (x, y) position on the curve, the Jacobian coordinates are (x1, y1, z1) +// where x = x1/z1² and y = y1/z1³. The greatest speedups come when the whole +// calculation can be performed within the transform (as in ScalarMult and +// ScalarBaseMult). But even for Add and Double, it's faster to apply and +// reverse the transform than to operate in affine coordinates. + +import ( + "crypto/elliptic" + "io" + "math/big" + "sync" +) + +// A BitCurve represents a Koblitz Curve with a=0. +// See http://www.hyperelliptic.org/EFD/g1p/auto-shortw.html +type BitCurve struct { + Name string + P *big.Int // the order of the underlying field + N *big.Int // the order of the base point + B *big.Int // the constant of the BitCurve equation + Gx, Gy *big.Int // (x,y) of the base point + BitSize int // the size of the underlying field +} + +// Params returns the parameters of the given BitCurve (see BitCurve struct) +func (bitCurve *BitCurve) Params() (cp *elliptic.CurveParams) { + cp = new(elliptic.CurveParams) + cp.Name = bitCurve.Name + cp.P = bitCurve.P + cp.N = bitCurve.N + cp.Gx = bitCurve.Gx + cp.Gy = bitCurve.Gy + cp.BitSize = bitCurve.BitSize + return cp +} + +// IsOnCurve returns true if the given (x,y) lies on the BitCurve. +func (bitCurve *BitCurve) IsOnCurve(x, y *big.Int) bool { + // y² = x³ + b + y2 := new(big.Int).Mul(y, y) //y² + y2.Mod(y2, bitCurve.P) //y²%P + + x3 := new(big.Int).Mul(x, x) //x² + x3.Mul(x3, x) //x³ + + x3.Add(x3, bitCurve.B) //x³+B + x3.Mod(x3, bitCurve.P) //(x³+B)%P + + return x3.Cmp(y2) == 0 +} + +// affineFromJacobian reverses the Jacobian transform. See the comment at the +// top of the file. +func (bitCurve *BitCurve) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) { + if z.Cmp(big.NewInt(0)) == 0 { + panic("bitcurve: Can't convert to affine with Jacobian Z = 0") + } + // x = YZ^2 mod P + zinv := new(big.Int).ModInverse(z, bitCurve.P) + zinvsq := new(big.Int).Mul(zinv, zinv) + + xOut = new(big.Int).Mul(x, zinvsq) + xOut.Mod(xOut, bitCurve.P) + // y = YZ^3 mod P + zinvsq.Mul(zinvsq, zinv) + yOut = new(big.Int).Mul(y, zinvsq) + yOut.Mod(yOut, bitCurve.P) + return xOut, yOut +} + +// Add returns the sum of (x1,y1) and (x2,y2) +func (bitCurve *BitCurve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) { + z := new(big.Int).SetInt64(1) + x, y, z := bitCurve.addJacobian(x1, y1, z, x2, y2, z) + return bitCurve.affineFromJacobian(x, y, z) +} + +// addJacobian takes two points in Jacobian coordinates, (x1, y1, z1) and +// (x2, y2, z2) and returns their sum, also in Jacobian form. +func (bitCurve *BitCurve) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int, *big.Int, *big.Int) { + // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl + z1z1 := new(big.Int).Mul(z1, z1) + z1z1.Mod(z1z1, bitCurve.P) + z2z2 := new(big.Int).Mul(z2, z2) + z2z2.Mod(z2z2, bitCurve.P) + + u1 := new(big.Int).Mul(x1, z2z2) + u1.Mod(u1, bitCurve.P) + u2 := new(big.Int).Mul(x2, z1z1) + u2.Mod(u2, bitCurve.P) + h := new(big.Int).Sub(u2, u1) + if h.Sign() == -1 { + h.Add(h, bitCurve.P) + } + i := new(big.Int).Lsh(h, 1) + i.Mul(i, i) + j := new(big.Int).Mul(h, i) + + s1 := new(big.Int).Mul(y1, z2) + s1.Mul(s1, z2z2) + s1.Mod(s1, bitCurve.P) + s2 := new(big.Int).Mul(y2, z1) + s2.Mul(s2, z1z1) + s2.Mod(s2, bitCurve.P) + r := new(big.Int).Sub(s2, s1) + if r.Sign() == -1 { + r.Add(r, bitCurve.P) + } + r.Lsh(r, 1) + v := new(big.Int).Mul(u1, i) + + x3 := new(big.Int).Set(r) + x3.Mul(x3, x3) + x3.Sub(x3, j) + x3.Sub(x3, v) + x3.Sub(x3, v) + x3.Mod(x3, bitCurve.P) + + y3 := new(big.Int).Set(r) + v.Sub(v, x3) + y3.Mul(y3, v) + s1.Mul(s1, j) + s1.Lsh(s1, 1) + y3.Sub(y3, s1) + y3.Mod(y3, bitCurve.P) + + z3 := new(big.Int).Add(z1, z2) + z3.Mul(z3, z3) + z3.Sub(z3, z1z1) + if z3.Sign() == -1 { + z3.Add(z3, bitCurve.P) + } + z3.Sub(z3, z2z2) + if z3.Sign() == -1 { + z3.Add(z3, bitCurve.P) + } + z3.Mul(z3, h) + z3.Mod(z3, bitCurve.P) + + return x3, y3, z3 +} + +// Double returns 2*(x,y) +func (bitCurve *BitCurve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) { + z1 := new(big.Int).SetInt64(1) + return bitCurve.affineFromJacobian(bitCurve.doubleJacobian(x1, y1, z1)) +} + +// doubleJacobian takes a point in Jacobian coordinates, (x, y, z), and +// returns its double, also in Jacobian form. +func (bitCurve *BitCurve) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, *big.Int) { + // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l + + a := new(big.Int).Mul(x, x) //X1² + b := new(big.Int).Mul(y, y) //Y1² + c := new(big.Int).Mul(b, b) //B² + + d := new(big.Int).Add(x, b) //X1+B + d.Mul(d, d) //(X1+B)² + d.Sub(d, a) //(X1+B)²-A + d.Sub(d, c) //(X1+B)²-A-C + d.Mul(d, big.NewInt(2)) //2*((X1+B)²-A-C) + + e := new(big.Int).Mul(big.NewInt(3), a) //3*A + f := new(big.Int).Mul(e, e) //E² + + x3 := new(big.Int).Mul(big.NewInt(2), d) //2*D + x3.Sub(f, x3) //F-2*D + x3.Mod(x3, bitCurve.P) + + y3 := new(big.Int).Sub(d, x3) //D-X3 + y3.Mul(e, y3) //E*(D-X3) + y3.Sub(y3, new(big.Int).Mul(big.NewInt(8), c)) //E*(D-X3)-8*C + y3.Mod(y3, bitCurve.P) + + z3 := new(big.Int).Mul(y, z) //Y1*Z1 + z3.Mul(big.NewInt(2), z3) //3*Y1*Z1 + z3.Mod(z3, bitCurve.P) + + return x3, y3, z3 +} + +//TODO: double check if it is okay +// ScalarMult returns k*(Bx,By) where k is a number in big-endian form. +func (bitCurve *BitCurve) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) { + // We have a slight problem in that the identity of the group (the + // point at infinity) cannot be represented in (x, y) form on a finite + // machine. Thus the standard add/double algorithm has to be tweaked + // slightly: our initial state is not the identity, but x, and we + // ignore the first true bit in |k|. If we don't find any true bits in + // |k|, then we return nil, nil, because we cannot return the identity + // element. + + Bz := new(big.Int).SetInt64(1) + x := Bx + y := By + z := Bz + + seenFirstTrue := false + for _, byte := range k { + for bitNum := 0; bitNum < 8; bitNum++ { + if seenFirstTrue { + x, y, z = bitCurve.doubleJacobian(x, y, z) + } + if byte&0x80 == 0x80 { + if !seenFirstTrue { + seenFirstTrue = true + } else { + x, y, z = bitCurve.addJacobian(Bx, By, Bz, x, y, z) + } + } + byte <<= 1 + } + } + + if !seenFirstTrue { + return nil, nil + } + + return bitCurve.affineFromJacobian(x, y, z) +} + +// ScalarBaseMult returns k*G, where G is the base point of the group and k is +// an integer in big-endian form. +func (bitCurve *BitCurve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) { + return bitCurve.ScalarMult(bitCurve.Gx, bitCurve.Gy, k) +} + +var mask = []byte{0xff, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f} + +//TODO: double check if it is okay +// GenerateKey returns a public/private key pair. The private key is generated +// using the given reader, which must return random data. +func (bitCurve *BitCurve) GenerateKey(rand io.Reader) (priv []byte, x, y *big.Int, err error) { + byteLen := (bitCurve.BitSize + 7) >> 3 + priv = make([]byte, byteLen) + + for x == nil { + _, err = io.ReadFull(rand, priv) + if err != nil { + return + } + // We have to mask off any excess bits in the case that the size of the + // underlying field is not a whole number of bytes. + priv[0] &= mask[bitCurve.BitSize%8] + // This is because, in tests, rand will return all zeros and we don't + // want to get the point at infinity and loop forever. + priv[1] ^= 0x42 + x, y = bitCurve.ScalarBaseMult(priv) + } + return +} + +// Marshal converts a point into the form specified in section 4.3.6 of ANSI +// X9.62. +func (bitCurve *BitCurve) Marshal(x, y *big.Int) []byte { + byteLen := (bitCurve.BitSize + 7) >> 3 + + ret := make([]byte, 1+2*byteLen) + ret[0] = 4 // uncompressed point + + xBytes := x.Bytes() + copy(ret[1+byteLen-len(xBytes):], xBytes) + yBytes := y.Bytes() + copy(ret[1+2*byteLen-len(yBytes):], yBytes) + return ret +} + +// Unmarshal converts a point, serialised by Marshal, into an x, y pair. On +// error, x = nil. +func (bitCurve *BitCurve) Unmarshal(data []byte) (x, y *big.Int) { + byteLen := (bitCurve.BitSize + 7) >> 3 + if len(data) != 1+2*byteLen { + return + } + if data[0] != 4 { // uncompressed form + return + } + x = new(big.Int).SetBytes(data[1 : 1+byteLen]) + y = new(big.Int).SetBytes(data[1+byteLen:]) + return +} + +//curve parameters taken from: +//http://www.secg.org/collateral/sec2_final.pdf + +var initonce sync.Once +var secp160k1 *BitCurve +var secp192k1 *BitCurve +var secp224k1 *BitCurve +var secp256k1 *BitCurve + +func initAll() { + initS160() + initS192() + initS224() + initS256() +} + +func initS160() { + // See SEC 2 section 2.4.1 + secp160k1 = new(BitCurve) + secp160k1.Name = "secp160k1" + secp160k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC73", 16) + secp160k1.N, _ = new(big.Int).SetString("0100000000000000000001B8FA16DFAB9ACA16B6B3", 16) + secp160k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000007", 16) + secp160k1.Gx, _ = new(big.Int).SetString("3B4C382CE37AA192A4019E763036F4F5DD4D7EBB", 16) + secp160k1.Gy, _ = new(big.Int).SetString("938CF935318FDCED6BC28286531733C3F03C4FEE", 16) + secp160k1.BitSize = 160 +} + +func initS192() { + // See SEC 2 section 2.5.1 + secp192k1 = new(BitCurve) + secp192k1.Name = "secp192k1" + secp192k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFEE37", 16) + secp192k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFE26F2FC170F69466A74DEFD8D", 16) + secp192k1.B, _ = new(big.Int).SetString("000000000000000000000000000000000000000000000003", 16) + secp192k1.Gx, _ = new(big.Int).SetString("DB4FF10EC057E9AE26B07D0280B7F4341DA5D1B1EAE06C7D", 16) + secp192k1.Gy, _ = new(big.Int).SetString("9B2F2F6D9C5628A7844163D015BE86344082AA88D95E2F9D", 16) + secp192k1.BitSize = 192 +} + +func initS224() { + // See SEC 2 section 2.6.1 + secp224k1 = new(BitCurve) + secp224k1.Name = "secp224k1" + secp224k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFE56D", 16) + secp224k1.N, _ = new(big.Int).SetString("010000000000000000000000000001DCE8D2EC6184CAF0A971769FB1F7", 16) + secp224k1.B, _ = new(big.Int).SetString("00000000000000000000000000000000000000000000000000000005", 16) + secp224k1.Gx, _ = new(big.Int).SetString("A1455B334DF099DF30FC28A169A467E9E47075A90F7E650EB6B7A45C", 16) + secp224k1.Gy, _ = new(big.Int).SetString("7E089FED7FBA344282CAFBD6F7E319F7C0B0BD59E2CA4BDB556D61A5", 16) + secp224k1.BitSize = 224 +} + +func initS256() { + // See SEC 2 section 2.7.1 + secp256k1 = new(BitCurve) + secp256k1.Name = "secp256k1" + secp256k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 16) + secp256k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 16) + secp256k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000000000000000000000000000007", 16) + secp256k1.Gx, _ = new(big.Int).SetString("79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798", 16) + secp256k1.Gy, _ = new(big.Int).SetString("483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8", 16) + secp256k1.BitSize = 256 +} + +// S160 returns a BitCurve which implements secp160k1 (see SEC 2 section 2.4.1) +func S160() *BitCurve { + initonce.Do(initAll) + return secp160k1 +} + +// S192 returns a BitCurve which implements secp192k1 (see SEC 2 section 2.5.1) +func S192() *BitCurve { + initonce.Do(initAll) + return secp192k1 +} + +// S224 returns a BitCurve which implements secp224k1 (see SEC 2 section 2.6.1) +func S224() *BitCurve { + initonce.Do(initAll) + return secp224k1 +} + +// S256 returns a BitCurve which implements bitcurves (see SEC 2 section 2.7.1) +func S256() *BitCurve { + initonce.Do(initAll) + return secp256k1 +} diff --git a/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go b/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go new file mode 100644 index 0000000000..77fb8b9a04 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go @@ -0,0 +1,134 @@ +// Package brainpool implements Brainpool elliptic curves. +// Implementation of rcurves is from github.com/ebfe/brainpool +// Note that these curves are implemented with naive, non-constant time operations +// and are likely not suitable for enviroments where timing attacks are a concern. +package brainpool + +import ( + "crypto/elliptic" + "math/big" + "sync" +) + +var ( + once sync.Once + p256t1, p384t1, p512t1 *elliptic.CurveParams + p256r1, p384r1, p512r1 *rcurve +) + +func initAll() { + initP256t1() + initP384t1() + initP512t1() + initP256r1() + initP384r1() + initP512r1() +} + +func initP256t1() { + p256t1 = &elliptic.CurveParams{Name: "brainpoolP256t1"} + p256t1.P, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377", 16) + p256t1.N, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7", 16) + p256t1.B, _ = new(big.Int).SetString("662C61C430D84EA4FE66A7733D0B76B7BF93EBC4AF2F49256AE58101FEE92B04", 16) + p256t1.Gx, _ = new(big.Int).SetString("A3E8EB3CC1CFE7B7732213B23A656149AFA142C47AAFBC2B79A191562E1305F4", 16) + p256t1.Gy, _ = new(big.Int).SetString("2D996C823439C56D7F7B22E14644417E69BCB6DE39D027001DABE8F35B25C9BE", 16) + p256t1.BitSize = 256 +} + +func initP256r1() { + twisted := p256t1 + params := &elliptic.CurveParams{ + Name: "brainpoolP256r1", + P: twisted.P, + N: twisted.N, + BitSize: twisted.BitSize, + } + params.Gx, _ = new(big.Int).SetString("8BD2AEB9CB7E57CB2C4B482FFC81B7AFB9DE27E1E3BD23C23A4453BD9ACE3262", 16) + params.Gy, _ = new(big.Int).SetString("547EF835C3DAC4FD97F8461A14611DC9C27745132DED8E545C1D54C72F046997", 16) + z, _ := new(big.Int).SetString("3E2D4BD9597B58639AE7AA669CAB9837CF5CF20A2C852D10F655668DFC150EF0", 16) + p256r1 = newrcurve(twisted, params, z) +} + +func initP384t1() { + p384t1 = &elliptic.CurveParams{Name: "brainpoolP384t1"} + p384t1.P, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123ACD3A729901D1A71874700133107EC53", 16) + p384t1.N, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B31F166E6CAC0425A7CF3AB6AF6B7FC3103B883202E9046565", 16) + p384t1.B, _ = new(big.Int).SetString("7F519EADA7BDA81BD826DBA647910F8C4B9346ED8CCDC64E4B1ABD11756DCE1D2074AA263B88805CED70355A33B471EE", 16) + p384t1.Gx, _ = new(big.Int).SetString("18DE98B02DB9A306F2AFCD7235F72A819B80AB12EBD653172476FECD462AABFFC4FF191B946A5F54D8D0AA2F418808CC", 16) + p384t1.Gy, _ = new(big.Int).SetString("25AB056962D30651A114AFD2755AD336747F93475B7A1FCA3B88F2B6A208CCFE469408584DC2B2912675BF5B9E582928", 16) + p384t1.BitSize = 384 +} + +func initP384r1() { + twisted := p384t1 + params := &elliptic.CurveParams{ + Name: "brainpoolP384r1", + P: twisted.P, + N: twisted.N, + BitSize: twisted.BitSize, + } + params.Gx, _ = new(big.Int).SetString("1D1C64F068CF45FFA2A63A81B7C13F6B8847A3E77EF14FE3DB7FCAFE0CBD10E8E826E03436D646AAEF87B2E247D4AF1E", 16) + params.Gy, _ = new(big.Int).SetString("8ABE1D7520F9C2A45CB1EB8E95CFD55262B70B29FEEC5864E19C054FF99129280E4646217791811142820341263C5315", 16) + z, _ := new(big.Int).SetString("41DFE8DD399331F7166A66076734A89CD0D2BCDB7D068E44E1F378F41ECBAE97D2D63DBC87BCCDDCCC5DA39E8589291C", 16) + p384r1 = newrcurve(twisted, params, z) +} + +func initP512t1() { + p512t1 = &elliptic.CurveParams{Name: "brainpoolP512t1"} + p512t1.P, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA703308717D4D9B009BC66842AECDA12AE6A380E62881FF2F2D82C68528AA6056583A48F3", 16) + p512t1.N, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA70330870553E5C414CA92619418661197FAC10471DB1D381085DDADDB58796829CA90069", 16) + p512t1.B, _ = new(big.Int).SetString("7CBBBCF9441CFAB76E1890E46884EAE321F70C0BCB4981527897504BEC3E36A62BCDFA2304976540F6450085F2DAE145C22553B465763689180EA2571867423E", 16) + p512t1.Gx, _ = new(big.Int).SetString("640ECE5C12788717B9C1BA06CBC2A6FEBA85842458C56DDE9DB1758D39C0313D82BA51735CDB3EA499AA77A7D6943A64F7A3F25FE26F06B51BAA2696FA9035DA", 16) + p512t1.Gy, _ = new(big.Int).SetString("5B534BD595F5AF0FA2C892376C84ACE1BB4E3019B71634C01131159CAE03CEE9D9932184BEEF216BD71DF2DADF86A627306ECFF96DBB8BACE198B61E00F8B332", 16) + p512t1.BitSize = 512 +} + +func initP512r1() { + twisted := p512t1 + params := &elliptic.CurveParams{ + Name: "brainpoolP512r1", + P: twisted.P, + N: twisted.N, + BitSize: twisted.BitSize, + } + params.Gx, _ = new(big.Int).SetString("81AEE4BDD82ED9645A21322E9C4C6A9385ED9F70B5D916C1B43B62EEF4D0098EFF3B1F78E2D0D48D50D1687B93B97D5F7C6D5047406A5E688B352209BCB9F822", 16) + params.Gy, _ = new(big.Int).SetString("7DDE385D566332ECC0EABFA9CF7822FDF209F70024A57B1AA000C55B881F8111B2DCDE494A5F485E5BCA4BD88A2763AED1CA2B2FA8F0540678CD1E0F3AD80892", 16) + z, _ := new(big.Int).SetString("12EE58E6764838B69782136F0F2D3BA06E27695716054092E60A80BEDB212B64E585D90BCE13761F85C3F1D2A64E3BE8FEA2220F01EBA5EEB0F35DBD29D922AB", 16) + p512r1 = newrcurve(twisted, params, z) +} + +// P256t1 returns a Curve which implements Brainpool P256t1 (see RFC 5639, section 3.4) +func P256t1() elliptic.Curve { + once.Do(initAll) + return p256t1 +} + +// P256r1 returns a Curve which implements Brainpool P256r1 (see RFC 5639, section 3.4) +func P256r1() elliptic.Curve { + once.Do(initAll) + return p256r1 +} + +// P384t1 returns a Curve which implements Brainpool P384t1 (see RFC 5639, section 3.6) +func P384t1() elliptic.Curve { + once.Do(initAll) + return p384t1 +} + +// P384r1 returns a Curve which implements Brainpool P384r1 (see RFC 5639, section 3.6) +func P384r1() elliptic.Curve { + once.Do(initAll) + return p384r1 +} + +// P512t1 returns a Curve which implements Brainpool P512t1 (see RFC 5639, section 3.7) +func P512t1() elliptic.Curve { + once.Do(initAll) + return p512t1 +} + +// P512r1 returns a Curve which implements Brainpool P512r1 (see RFC 5639, section 3.7) +func P512r1() elliptic.Curve { + once.Do(initAll) + return p512r1 +} diff --git a/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go b/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go new file mode 100644 index 0000000000..2d5355085f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go @@ -0,0 +1,83 @@ +package brainpool + +import ( + "crypto/elliptic" + "math/big" +) + +var _ elliptic.Curve = (*rcurve)(nil) + +type rcurve struct { + twisted elliptic.Curve + params *elliptic.CurveParams + z *big.Int + zinv *big.Int + z2 *big.Int + z3 *big.Int + zinv2 *big.Int + zinv3 *big.Int +} + +var ( + two = big.NewInt(2) + three = big.NewInt(3) +) + +func newrcurve(twisted elliptic.Curve, params *elliptic.CurveParams, z *big.Int) *rcurve { + zinv := new(big.Int).ModInverse(z, params.P) + return &rcurve{ + twisted: twisted, + params: params, + z: z, + zinv: zinv, + z2: new(big.Int).Exp(z, two, params.P), + z3: new(big.Int).Exp(z, three, params.P), + zinv2: new(big.Int).Exp(zinv, two, params.P), + zinv3: new(big.Int).Exp(zinv, three, params.P), + } +} + +func (curve *rcurve) toTwisted(x, y *big.Int) (*big.Int, *big.Int) { + var tx, ty big.Int + tx.Mul(x, curve.z2) + tx.Mod(&tx, curve.params.P) + ty.Mul(y, curve.z3) + ty.Mod(&ty, curve.params.P) + return &tx, &ty +} + +func (curve *rcurve) fromTwisted(tx, ty *big.Int) (*big.Int, *big.Int) { + var x, y big.Int + x.Mul(tx, curve.zinv2) + x.Mod(&x, curve.params.P) + y.Mul(ty, curve.zinv3) + y.Mod(&y, curve.params.P) + return &x, &y +} + +func (curve *rcurve) Params() *elliptic.CurveParams { + return curve.params +} + +func (curve *rcurve) IsOnCurve(x, y *big.Int) bool { + return curve.twisted.IsOnCurve(curve.toTwisted(x, y)) +} + +func (curve *rcurve) Add(x1, y1, x2, y2 *big.Int) (x, y *big.Int) { + tx1, ty1 := curve.toTwisted(x1, y1) + tx2, ty2 := curve.toTwisted(x2, y2) + return curve.fromTwisted(curve.twisted.Add(tx1, ty1, tx2, ty2)) +} + +func (curve *rcurve) Double(x1, y1 *big.Int) (x, y *big.Int) { + return curve.fromTwisted(curve.twisted.Double(curve.toTwisted(x1, y1))) +} + +func (curve *rcurve) ScalarMult(x1, y1 *big.Int, scalar []byte) (x, y *big.Int) { + tx1, ty1 := curve.toTwisted(x1, y1) + return curve.fromTwisted(curve.twisted.ScalarMult(tx1, ty1, scalar)) +} + +func (curve *rcurve) ScalarBaseMult(scalar []byte) (x, y *big.Int) { + return curve.fromTwisted(curve.twisted.ScalarBaseMult(scalar)) +} \ No newline at end of file diff --git a/vendor/github.com/ProtonMail/go-crypto/eax/eax.go b/vendor/github.com/ProtonMail/go-crypto/eax/eax.go new file mode 100644 index 0000000000..6b6bc7aed0 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/eax/eax.go @@ -0,0 +1,162 @@ +// Copyright (C) 2019 ProtonTech AG + +// Package eax provides an implementation of the EAX +// (encrypt-authenticate-translate) mode of operation, as described in +// Bellare, Rogaway, and Wagner "THE EAX MODE OF OPERATION: A TWO-PASS +// AUTHENTICATED-ENCRYPTION SCHEME OPTIMIZED FOR SIMPLICITY AND EFFICIENCY." +// In FSE'04, volume 3017 of LNCS, 2004 +package eax + +import ( + "crypto/cipher" + "crypto/subtle" + "errors" + "github.com/ProtonMail/go-crypto/internal/byteutil" +) + +const ( + defaultTagSize = 16 + defaultNonceSize = 16 +) + +type eax struct { + block cipher.Block // Only AES-{128, 192, 256} supported + tagSize int // At least 12 bytes recommended + nonceSize int +} + +func (e *eax) NonceSize() int { + return e.nonceSize +} + +func (e *eax) Overhead() int { + return e.tagSize +} + +// NewEAX returns an EAX instance with AES-{KEYLENGTH} and default nonce and +// tag lengths. Supports {128, 192, 256}- bit key length. +func NewEAX(block cipher.Block) (cipher.AEAD, error) { + return NewEAXWithNonceAndTagSize(block, defaultNonceSize, defaultTagSize) +} + +// NewEAXWithNonceAndTagSize returns an EAX instance with AES-{keyLength} and +// given nonce and tag lengths in bytes. Panics on zero nonceSize and +// exceedingly long tags. +// +// It is recommended to use at least 12 bytes as tag length (see, for instance, +// NIST SP 800-38D). +// +// Only to be used for compatibility with existing cryptosystems with +// non-standard parameters. For all other cases, prefer NewEAX. +func NewEAXWithNonceAndTagSize( + block cipher.Block, nonceSize, tagSize int) (cipher.AEAD, error) { + if nonceSize < 1 { + return nil, eaxError("Cannot initialize EAX with nonceSize = 0") + } + if tagSize > block.BlockSize() { + return nil, eaxError("Custom tag length exceeds blocksize") + } + return &eax{ + block: block, + tagSize: tagSize, + nonceSize: nonceSize, + }, nil +} + +func (e *eax) Seal(dst, nonce, plaintext, adata []byte) []byte { + if len(nonce) > e.nonceSize { + panic("crypto/eax: Nonce too long for this instance") + } + ret, out := byteutil.SliceForAppend(dst, len(plaintext) + e.tagSize) + omacNonce := e.omacT(0, nonce) + omacAdata := e.omacT(1, adata) + + // Encrypt message using CTR mode and omacNonce as IV + ctr := cipher.NewCTR(e.block, omacNonce) + ciphertextData := out[:len(plaintext)] + ctr.XORKeyStream(ciphertextData, plaintext) + + omacCiphertext := e.omacT(2, ciphertextData) + + tag := out[len(plaintext):] + for i := 0; i < e.tagSize; i++ { + tag[i] = omacCiphertext[i] ^ omacNonce[i] ^ omacAdata[i] + } + return ret +} + +func (e* eax) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) { + if len(nonce) > e.nonceSize { + panic("crypto/eax: Nonce too long for this instance") + } + if len(ciphertext) < e.tagSize { + return nil, eaxError("Ciphertext shorter than tag length") + } + sep := len(ciphertext) - e.tagSize + + // Compute tag + omacNonce := e.omacT(0, nonce) + omacAdata := e.omacT(1, adata) + omacCiphertext := e.omacT(2, ciphertext[:sep]) + + tag := make([]byte, e.tagSize) + for i := 0; i < e.tagSize; i++ { + tag[i] = omacCiphertext[i] ^ omacNonce[i] ^ omacAdata[i] + } + + // Compare tags + if subtle.ConstantTimeCompare(ciphertext[sep:], tag) != 1 { + return nil, eaxError("Tag authentication failed") + } + + // Decrypt ciphertext + ret, out := byteutil.SliceForAppend(dst, len(ciphertext)) + ctr := cipher.NewCTR(e.block, omacNonce) + ctr.XORKeyStream(out, ciphertext[:sep]) + + return ret[:sep], nil +} + +// Tweakable OMAC - Calls OMAC_K([t]_n || plaintext) +func (e *eax) omacT(t byte, plaintext []byte) []byte { + blockSize := e.block.BlockSize() + byteT := make([]byte, blockSize) + byteT[blockSize-1] = t + concat := append(byteT, plaintext...) + return e.omac(concat) +} + +func (e *eax) omac(plaintext []byte) []byte { + blockSize := e.block.BlockSize() + // L ← E_K(0^n); B ← 2L; P ← 4L + L := make([]byte, blockSize) + e.block.Encrypt(L, L) + B := byteutil.GfnDouble(L) + P := byteutil.GfnDouble(B) + + // CBC with IV = 0 + cbc := cipher.NewCBCEncrypter(e.block, make([]byte, blockSize)) + padded := e.pad(plaintext, B, P) + cbcCiphertext := make([]byte, len(padded)) + cbc.CryptBlocks(cbcCiphertext, padded) + + return cbcCiphertext[len(cbcCiphertext)-blockSize:] +} + +func (e *eax) pad(plaintext, B, P []byte) []byte { + // if |M| in {n, 2n, 3n, ...} + blockSize := e.block.BlockSize() + if len(plaintext) != 0 && len(plaintext)%blockSize == 0 { + return byteutil.RightXor(plaintext, B) + } + + // else return (M || 1 || 0^(n−1−(|M| % n))) xor→ P + ending := make([]byte, blockSize-len(plaintext)%blockSize) + ending[0] = 0x80 + padded := append(plaintext, ending...) + return byteutil.RightXor(padded, P) +} + +func eaxError(err string) error { + return errors.New("crypto/eax: " + err) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go b/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go new file mode 100644 index 0000000000..ddb53d0790 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go @@ -0,0 +1,58 @@ +package eax + +// Test vectors from +// https://web.cs.ucdavis.edu/~rogaway/papers/eax.pdf +var testVectors = []struct { + msg, key, nonce, header, ciphertext string +}{ + {"", + "233952DEE4D5ED5F9B9C6D6FF80FF478", + "62EC67F9C3A4A407FCB2A8C49031A8B3", + "6BFB914FD07EAE6B", + "E037830E8389F27B025A2D6527E79D01"}, + {"F7FB", + "91945D3F4DCBEE0BF45EF52255F095A4", + "BECAF043B0A23D843194BA972C66DEBD", + "FA3BFD4806EB53FA", + "19DD5C4C9331049D0BDAB0277408F67967E5"}, + {"1A47CB4933", + "01F74AD64077F2E704C0F60ADA3DD523", + "70C3DB4F0D26368400A10ED05D2BFF5E", + "234A3463C1264AC6", + "D851D5BAE03A59F238A23E39199DC9266626C40F80"}, + {"481C9E39B1", + "D07CF6CBB7F313BDDE66B727AFD3C5E8", + "8408DFFF3C1A2B1292DC199E46B7D617", + "33CCE2EABFF5A79D", + "632A9D131AD4C168A4225D8E1FF755939974A7BEDE"}, + {"40D0C07DA5E4", + "35B6D0580005BBC12B0587124557D2C2", + "FDB6B06676EEDC5C61D74276E1F8E816", + "AEB96EAEBE2970E9", + "071DFE16C675CB0677E536F73AFE6A14B74EE49844DD"}, + {"4DE3B35C3FC039245BD1FB7D", + "BD8E6E11475E60B268784C38C62FEB22", + "6EAC5C93072D8E8513F750935E46DA1B", + "D4482D1CA78DCE0F", + "835BB4F15D743E350E728414ABB8644FD6CCB86947C5E10590210A4F"}, + {"8B0A79306C9CE7ED99DAE4F87F8DD61636", + "7C77D6E813BED5AC98BAA417477A2E7D", + "1A8C98DCD73D38393B2BF1569DEEFC19", + "65D2017990D62528", + "02083E3979DA014812F59F11D52630DA30137327D10649B0AA6E1C181DB617D7F2"}, + {"1BDA122BCE8A8DBAF1877D962B8592DD2D56", + "5FFF20CAFAB119CA2FC73549E20F5B0D", + "DDE59B97D722156D4D9AFF2BC7559826", + "54B9F04E6A09189A", + "2EC47B2C4954A489AFC7BA4897EDCDAE8CC33B60450599BD02C96382902AEF7F832A"}, + {"6CF36720872B8513F6EAB1A8A44438D5EF11", + "A4A4782BCFFD3EC5E7EF6D8C34A56123", + "B781FCF2F75FA5A8DE97A9CA48E522EC", + "899A175897561D7E", + "0DE18FD0FDD91E7AF19F1D8EE8733938B1E8E7F6D2231618102FDB7FE55FF1991700"}, + {"CA40D7446E545FFAED3BD12A740A659FFBBB3CEAB7", + "8395FCF1E95BEBD697BD010BC766AAC3", + "22E7ADD93CFC6393C57EC0B3C17D6B44", + "126735FCC320D25A", + "CB8920F87A6C75CFF39627B56E3ED197C552D295A7CFC46AFC253B4652B1AF3795B124AB6E"}, +} diff --git a/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go b/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go new file mode 100644 index 0000000000..4eb19f28d9 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go @@ -0,0 +1,131 @@ +// These vectors include key length in {128, 192, 256}, tag size 128, and +// random nonce, header, and plaintext lengths. + +// This file was automatically generated. + +package eax + +var randomVectors = []struct { + key, nonce, header, plaintext, ciphertext string +}{ + {"DFDE093F36B0356E5A81F609786982E3", + "1D8AC604419001816905BA72B14CED7E", + "152A1517A998D7A24163FCDD146DE81AC347C8B97088F502093C1ABB8F6E33D9A219C34D7603A18B1F5ABE02E56661B7D7F67E81EC08C1302EF38D80A859486D450E94A4F26AD9E68EEBBC0C857A0FC5CF9E641D63D565A7E361BC8908F5A8DC8FD6", + "1C8EAAB71077FE18B39730A3156ADE29C5EE824C7EE86ED2A253B775603FB237116E654F6FEC588DD27F523A0E01246FE73FE348491F2A8E9ABC6CA58D663F71CDBCF4AD798BE46C42AE6EE8B599DB44A1A48D7BBBBA0F7D2750181E1C5E66967F7D57CBD30AFBDA5727", + "79E7E150934BBEBF7013F61C60462A14D8B15AF7A248AFB8A344EF021C1500E16666891D6E973D8BB56B71A371F12CA34660C4410C016982B20F547E3762A58B7BF4F20236CADCF559E2BE7D783B13723B2741FC7CDC8997D839E39A3DDD2BADB96743DD7049F1BDB0516A262869915B3F70498AFB7B191BF960"}, + {"F10619EF02E5D94D7550EB84ED364A21", + "8DC0D4F2F745BBAE835CC5574B942D20", + "FE561358F2E8DF7E1024FF1AE9A8D36EBD01352214505CB99D644777A8A1F6027FA2BDBFC529A9B91136D5F2416CFC5F0F4EC3A1AFD32BDDA23CA504C5A5CB451785FABF4DFE4CD50D817491991A60615B30286361C100A95D1712F2A45F8E374461F4CA2B", + "D7B5A971FC219631D30EFC3664AE3127D9CF3097DAD9C24AC7905D15E8D9B25B026B31D68CAE00975CDB81EB1FD96FD5E1A12E2BB83FA25F1B1D91363457657FC03875C27F2946C5", + "2F336ED42D3CC38FC61660C4CD60BA4BD438B05F5965D8B7B399D2E7167F5D34F792D318F94DB15D67463AC449E13D568CC09BFCE32A35EE3EE96A041927680AE329811811E27F2D1E8E657707AF99BA96D13A478D695D59"}, + {"429F514EFC64D98A698A9247274CFF45", + "976AA5EB072F912D126ACEBC954FEC38", + "A71D89DC5B6CEDBB7451A27C3C2CAE09126DB4C421", + "5632FE62AB1DC549D54D3BC3FC868ACCEDEFD9ECF5E9F8", + "848AE4306CA8C7F416F8707625B7F55881C0AB430353A5C967CDA2DA787F581A70E34DBEBB2385"}, + {"398138F309085F47F8457CDF53895A63", + "F8A8A7F2D28E5FFF7BBC2F24353F7A36", + "5D633C21BA7764B8855CAB586F3746E236AD486039C83C6B56EFA9C651D38A41D6B20DAEE3418BFEA44B8BD6", + "A3BBAA91920AF5E10659818B1B3B300AC79BFC129C8329E75251F73A66D3AE0128EB91D5031E0A65C329DB7D1E9C0493E268", + "D078097267606E5FB07CFB7E2B4B718172A82C6A4CEE65D549A4DFB9838003BD2FBF64A7A66988AC1A632FD88F9E9FBB57C5A78AD2E086EACBA3DB68511D81C2970A"}, + {"7A4151EBD3901B42CBA45DAFB2E931BA", + "0FC88ACEE74DD538040321C330974EB8", + "250464FB04733BAB934C59E6AD2D6AE8D662CBCFEFBE61E5A308D4211E58C4C25935B72C69107722E946BFCBF416796600542D76AEB73F2B25BF53BAF97BDEB36ED3A7A51C31E7F170EB897457E7C17571D1BA0A908954E9", + "88C41F3EBEC23FAB8A362D969CAC810FAD4F7CA6A7F7D0D44F060F92E37E1183768DD4A8C733F71C96058D362A39876D183B86C103DE", + "74A25B2182C51096D48A870D80F18E1CE15867778E34FCBA6BD7BFB3739FDCD42AD0F2D9F4EBA29085285C6048C15BCE5E5166F1F962D3337AA88E6062F05523029D0A7F0BF9"}, + {"BFB147E1CD5459424F8C0271FC0E0DC5", + "EABCC126442BF373969EA3015988CC45", + "4C0880E1D71AA2C7", + "BE1B5EC78FBF73E7A6682B21BA7E0E5D2D1C7ABE", + "5660D7C1380E2F306895B1402CB2D6C37876504276B414D120F4CF92FDDDBB293A238EA0"}, + {"595DD6F52D18BC2CA8EB4EDAA18D9FA3", + "0F84B5D36CF4BC3B863313AF3B4D2E97", + "30AE6CC5F99580F12A779D98BD379A60948020C0B6FBD5746B30BA3A15C6CD33DAF376C70A9F15B6C0EB410A93161F7958AE23", + "8EF3687A1642B070970B0B91462229D1D76ABC154D18211F7152AA9FF368", + "317C1DDB11417E5A9CC4DDE7FDFF6659A5AC4B31DE025212580A05CDAC6024D3E4AE7C2966E52B9129E9ECDBED86"}, + {"44E6F2DC8FDC778AD007137D11410F50", + "270A237AD977F7187AA6C158A0BAB24F", + "509B0F0EB12E2AA5C5BA2DE553C07FAF4CE0C9E926531AA709A3D6224FCB783ACCF1559E10B1123EBB7D52E8AB54E6B5352A9ED0D04124BF0E9D9BACFD7E32B817B2E625F5EE94A64EDE9E470DE7FE6886C19B294F9F828209FE257A78", + "8B3D7815DF25618A5D0C55A601711881483878F113A12EC36CF64900549A3199555528559DC118F789788A55FAFD944E6E99A9CA3F72F238CD3F4D88223F7A745992B3FAED1848", + "1CC00D79F7AD82FDA71B58D286E5F34D0CC4CEF30704E771CC1E50746BDF83E182B078DB27149A42BAE619DF0F85B0B1090AD55D3B4471B0D6F6ECCD09C8F876B30081F0E7537A9624F8AAF29DA85E324122EFB4D68A56"}, + {"BB7BC352A03044B4428D8DBB4B0701FDEC4649FD17B81452", + "8B4BBE26CCD9859DCD84884159D6B0A4", + "2212BEB0E78E0F044A86944CF33C8D5C80D9DBE1034BF3BCF73611835C7D3A52F5BD2D81B68FD681B68540A496EE5DA16FD8AC8824E60E1EC2042BE28FB0BFAD4E4B03596446BDD8C37D936D9B3D5295BE19F19CF5ACE1D33A46C952CE4DE5C12F92C1DD051E04AEED", + "9037234CC44FFF828FABED3A7084AF40FA7ABFF8E0C0EFB57A1CC361E18FC4FAC1AB54F3ABFE9FF77263ACE16C3A", + "A9391B805CCD956081E0B63D282BEA46E7025126F1C1631239C33E92AA6F92CD56E5A4C56F00FF9658E93D48AF4EF0EF81628E34AD4DB0CDAEDCD2A17EE7"}, + {"99C0AD703196D2F60A74E6B378B838B31F82EA861F06FC4E", + "92745C018AA708ECFEB1667E9F3F1B01", + "828C69F376C0C0EC651C67749C69577D589EE39E51404D80EBF70C8660A8F5FD375473F4A7C611D59CB546A605D67446CE2AA844135FCD78BB5FBC90222A00D42920BB1D7EEDFB0C4672554F583EF23184F89063CDECBE482367B5F9AF3ACBC3AF61392BD94CBCD9B64677", + "A879214658FD0A5B0E09836639BF82E05EC7A5EF71D4701934BDA228435C68AC3D5CEB54997878B06A655EEACEFB1345C15867E7FE6C6423660C8B88DF128EBD6BCD85118DBAE16E9252FFB204324E5C8F38CA97759BDBF3CB0083", + "51FE87996F194A2585E438B023B345439EA60D1AEBED4650CDAF48A4D4EEC4FC77DC71CC4B09D3BEEF8B7B7AF716CE2B4EFFB3AC9E6323C18AC35E0AA6E2BBBC8889490EB6226C896B0D105EAB42BFE7053CCF00ED66BA94C1BA09A792AA873F0C3B26C5C5F9A936E57B25"}, + {"7086816D00D648FB8304AA8C9E552E1B69A9955FB59B25D1", + "0F45CF7F0BF31CCEB85D9DA10F4D749F", + "93F27C60A417D9F0669E86ACC784FC8917B502DAF30A6338F11B30B94D74FEFE2F8BE1BBE2EAD10FAB7EED3C6F72B7C3ECEE1937C32ED4970A6404E139209C05", + "877F046601F3CBE4FB1491943FA29487E738F94B99AF206262A1D6FF856C9AA0B8D4D08A54370C98F8E88FA3DCC2B14C1F76D71B2A4C7963AEE8AF960464C5BEC8357AD00DC8", + "FE96906B895CE6A8E72BC72344E2C8BB3C63113D70EAFA26C299BAFE77A8A6568172EB447FB3E86648A0AF3512DEB1AAC0819F3EC553903BF28A9FB0F43411237A774BF9EE03E445D280FBB9CD12B9BAAB6EF5E52691"}, + {"062F65A896D5BF1401BADFF70E91B458E1F9BD4888CB2E4D", + "5B11EA1D6008EBB41CF892FCA5B943D1", + "BAF4FF5C8242", + "A8870E091238355984EB2F7D61A865B9170F440BFF999A5993DD41A10F4440D21FF948DDA2BF663B2E03AC3324492DC5E40262ECC6A65C07672353BE23E7FB3A9D79FF6AA38D97960905A38DECC312CB6A59E5467ECF06C311CD43ADC0B543EDF34FE8BE611F176460D5627CA51F8F8D9FED71F55C", + "B10E127A632172CF8AA7539B140D2C9C2590E6F28C3CB892FC498FCE56A34F732FBFF32E79C7B9747D9094E8635A0C084D6F0247F9768FB5FF83493799A9BEC6C39572120C40E9292C8C947AE8573462A9108C36D9D7112E6995AE5867E6C8BB387D1C5D4BEF524F391B9FD9F0A3B4BFA079E915BCD920185CFD38D114C558928BD7D47877"}, + {"38A8E45D6D705A11AF58AED5A1344896998EACF359F2E26A", + "FD82B5B31804FF47D44199B533D0CF84", + "DE454D4E62FE879F2050EE3E25853623D3E9AC52EEC1A1779A48CFAF5ECA0BFDE44749391866D1", + "B804", + "164BB965C05EBE0931A1A63293EDF9C38C27"}, + {"34C33C97C6D7A0850DA94D78A58DC61EC717CD7574833068", + "343BE00DA9483F05C14F2E9EB8EA6AE8", + "78312A43EFDE3CAE34A65796FF059A3FE15304EEA5CF1D9306949FE5BF3349D4977D4EBE76C040FE894C5949E4E4D6681153DA87FB9AC5062063CA2EA183566343362370944CE0362D25FC195E124FD60E8682E665D13F2229DDA3E4B2CB1DCA", + "CC11BB284B1153578E4A5ED9D937B869DAF00F5B1960C23455CA9CC43F486A3BE0B66254F1041F04FDF459C8640465B6E1D2CF899A381451E8E7FCB50CF87823BE77E24B132BBEEDC72E53369B275E1D8F49ECE59F4F215230AC4FE133FC80E4F634EE80BA4682B62C86", + "E7F703DC31A95E3A4919FF957836CB76C063D81702AEA4703E1C2BF30831E58C4609D626EC6810E12EAA5B930F049FF9EFC22C3E3F1EBD4A1FB285CB02A1AC5AD46B425199FC0A85670A5C4E3DAA9636C8F64C199F42F18AAC8EA7457FD377F322DD7752D7D01B946C8F0A97E6113F0D50106F319AFD291AAACE"}, + {"C6ECF7F053573E403E61B83052A343D93CBCC179D1E835BE", + "E280E13D7367042E3AA09A80111B6184", + "21486C9D7A9647", + "5F2639AFA6F17931853791CD8C92382BBB677FD72D0AB1A080D0E49BFAA21810E963E4FACD422E92F65CBFAD5884A60CD94740DF31AF02F95AA57DA0C4401B0ED906", + "5C51DB20755302070C45F52E50128A67C8B2E4ED0EACB7E29998CCE2E8C289DD5655913EC1A51CC3AABE5CDC2402B2BE7D6D4BF6945F266FBD70BA9F37109067157AE7530678B45F64475D4EBFCB5FFF46A5"}, + {"5EC6CF7401BC57B18EF154E8C38ACCA8959E57D2F3975FF5", + "656B41CB3F9CF8C08BAD7EBFC80BD225", + "6B817C2906E2AF425861A7EF59BA5801F143EE2A139EE72697CDE168B4", + "2C0E1DDC9B1E5389BA63845B18B1F8A1DB062037151BCC56EF7C21C0BB4DAE366636BBA975685D7CC5A94AFBE89C769016388C56FB7B57CE750A12B718A8BDCF70E80E8659A8330EFC8F86640F21735E8C80E23FE43ABF23507CE3F964AE4EC99D", + "ED780CF911E6D1AA8C979B889B0B9DC1ABE261832980BDBFB576901D9EF5AB8048998E31A15BE54B3E5845A4D136AD24D0BDA1C3006168DF2F8AC06729CB0818867398150020131D8F04EDF1923758C9EABB5F735DE5EA1758D4BC0ACFCA98AFD202E9839B8720253693B874C65586C6F0"}, + {"C92F678EB2208662F5BCF3403EC05F5961E957908A3E79421E1D25FC19054153", + "DA0F3A40983D92F2D4C01FED33C7A192", + "2B6E9D26DB406A0FAB47608657AA10EFC2B4AA5F459B29FF85AC9A40BFFE7AEB04F77E9A11FAAA116D7F6D4DA417671A9AB02C588E0EF59CB1BFB4B1CC931B63A3B3A159FCEC97A04D1E6F0C7E6A9CEF6B0ABB04758A69F1FE754DF4C2610E8C46B6CF413BDB31351D55BEDCB7B4A13A1C98E10984475E0F2F957853", + "F37326A80E08", + "83519E53E321D334F7C10B568183775C0E9AAE55F806"}, + {"6847E0491BE57E72995D186D50094B0B3593957A5146798FCE68B287B2FB37B5", + "3EE1182AEBB19A02B128F28E1D5F7F99", + "D9F35ABB16D776CE", + "DB7566ED8EA95BDF837F23DB277BAFBC5E70D1105ADFD0D9EF15475051B1EF94709C67DCA9F8D5", + "2CDCED0C9EBD6E2A508822A685F7DCD1CDD99E7A5FCA786C234E7F7F1D27EC49751AD5DCFA30C5EDA87C43CAE3B919B6BBCFE34C8EDA59"}, + {"82B019673642C08388D3E42075A4D5D587558C229E4AB8F660E37650C4C41A0A", + "336F5D681E0410FAE7B607246092C6DC", + "D430CBD8FE435B64214E9E9CDC5DE99D31CFCFB8C10AA0587A49DF276611", + "998404153AD77003E1737EDE93ED79859EE6DCCA93CB40C4363AA817ABF2DBBD46E42A14A7183B6CC01E12A577888141363D0AE011EB6E8D28C0B235", + "9BEF69EEB60BD3D6065707B7557F25292A8872857CFBD24F2F3C088E4450995333088DA50FD9121221C504DF1D0CD5EFE6A12666C5D5BB12282CF4C19906E9CFAB97E9BDF7F49DC17CFC384B"}, + {"747B2E269B1859F0622C15C8BAD6A725028B1F94B8DB7326948D1E6ED663A8BC", + "AB91F7245DDCE3F1C747872D47BE0A8A", + "3B03F786EF1DDD76E1D42646DA4CD2A5165DC5383CE86D1A0B5F13F910DC278A4E451EE0192CBA178E13B3BA27FDC7840DF73D2E104B", + "6B803F4701114F3E5FE21718845F8416F70F626303F545BE197189E0A2BA396F37CE06D389EB2658BC7D56D67868708F6D0D32", + "1570DDB0BCE75AA25D1957A287A2C36B1A5F2270186DA81BA6112B7F43B0F3D1D0ED072591DCF1F1C99BBB25621FC39B896FF9BD9413A2845363A9DCD310C32CF98E57"}, + {"02E59853FB29AEDA0FE1C5F19180AD99A12FF2F144670BB2B8BADF09AD812E0A", + "C691294EF67CD04D1B9242AF83DD1421", + "879334DAE3", + "1E17F46A98FEF5CBB40759D95354", + "FED8C3FF27DDF6313AED444A2985B36CBA268AAD6AAC563C0BA28F6DB5DB"}, + {"F6C1FB9B4188F2288FF03BD716023198C3582CF2A037FC2F29760916C2B7FCDB", + "4228DA0678CA3534588859E77DFF014C", + "D8153CAF35539A61DD8D05B3C9B44F01E564FB9348BCD09A1C23B84195171308861058F0A3CD2A55B912A3AAEE06FF4D356C77275828F2157C2FC7C115DA39E443210CCC56BEDB0CC99BBFB227ABD5CC454F4E7F547C7378A659EEB6A7E809101A84F866503CB18D4484E1FA09B3EC7FC75EB2E35270800AA7", + "23B660A779AD285704B12EC1C580387A47BEC7B00D452C6570", + "5AA642BBABA8E49849002A2FAF31DB8FC7773EFDD656E469CEC19B3206D4174C9A263D0A05484261F6"}, + {"8FF6086F1FADB9A3FBE245EAC52640C43B39D43F89526BB5A6EBA47710931446", + "943188480C99437495958B0AE4831AA9", + "AD5CD0BDA426F6EBA23C8EB23DC73FF9FEC173355EDBD6C9344C4C4383F211888F7CE6B29899A6801DF6B38651A7C77150941A", + "80CD5EA8D7F81DDF5070B934937912E8F541A5301877528EB41AB60C020968D459960ED8FB73083329841A", + "ABAE8EB7F36FCA2362551E72DAC890BA1BB6794797E0FC3B67426EC9372726ED4725D379EA0AC9147E48DCD0005C502863C2C5358A38817C8264B5"}, + {"A083B54E6B1FE01B65D42FCD248F97BB477A41462BBFE6FD591006C022C8FD84", + "B0490F5BD68A52459556B3749ACDF40E", + "8892E047DA5CFBBDF7F3CFCBD1BD21C6D4C80774B1826999234394BD3E513CC7C222BB40E1E3140A152F19B3802F0D036C24A590512AD0E8", + "D7B15752789DC94ED0F36778A5C7BBB207BEC32BAC66E702B39966F06E381E090C6757653C3D26A81EC6AD6C364D66867A334C91BB0B8A8A4B6EACDF0783D09010AEBA2DD2062308FE99CC1F", + "C071280A732ADC93DF272BF1E613B2BB7D46FC6665EF2DC1671F3E211D6BDE1D6ADDD28DF3AA2E47053FC8BB8AE9271EC8BC8B2CFFA320D225B451685B6D23ACEFDD241FE284F8ADC8DB07F456985B14330BBB66E0FB212213E05B3E"}, +} diff --git a/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go b/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go new file mode 100644 index 0000000000..a6bdf51232 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go @@ -0,0 +1,92 @@ +// Copyright (C) 2019 ProtonTech AG +// This file contains necessary tools for the aex and ocb packages. +// +// These functions SHOULD NOT be used elsewhere, since they are optimized for +// specific input nature in the EAX and OCB modes of operation. + +package byteutil + +// GfnDouble computes 2 * input in the field of 2^n elements. +// The irreducible polynomial in the finite field for n=128 is +// x^128 + x^7 + x^2 + x + 1 (equals 0x87) +// Constant-time execution in order to avoid side-channel attacks +func GfnDouble(input []byte) []byte { + if len(input) != 16 { + panic("Doubling in GFn only implemented for n = 128") + } + // If the first bit is zero, return 2L = L << 1 + // Else return (L << 1) xor 0^120 10000111 + shifted := ShiftBytesLeft(input) + shifted[15] ^= ((input[0] >> 7) * 0x87) + return shifted +} + +// ShiftBytesLeft outputs the byte array corresponding to x << 1 in binary. +func ShiftBytesLeft(x []byte) []byte { + l := len(x) + dst := make([]byte, l) + for i := 0; i < l-1; i++ { + dst[i] = (x[i] << 1) | (x[i+1] >> 7) + } + dst[l-1] = x[l-1] << 1 + return dst +} + +// ShiftNBytesLeft puts in dst the byte array corresponding to x << n in binary. +func ShiftNBytesLeft(dst, x []byte, n int) { + // Erase first n / 8 bytes + copy(dst, x[n/8:]) + + // Shift the remaining n % 8 bits + bits := uint(n % 8) + l := len(dst) + for i := 0; i < l-1; i++ { + dst[i] = (dst[i] << bits) | (dst[i+1] >> uint(8 - bits)) + } + dst[l-1] = dst[l-1] << bits + + // Append trailing zeroes + dst = append(dst, make([]byte, n/8)...) +} + +// XorBytesMut assumes equal input length, replaces X with X XOR Y +func XorBytesMut(X, Y []byte) { + for i := 0; i < len(X); i++ { + X[i] ^= Y[i] + } +} + + +// XorBytes assumes equal input length, puts X XOR Y into Z +func XorBytes(Z, X, Y []byte) { + for i := 0; i < len(X); i++ { + Z[i] = X[i] ^ Y[i] + } +} + +// RightXor XORs smaller input (assumed Y) at the right of the larger input (assumed X) +func RightXor(X, Y []byte) []byte { + offset := len(X) - len(Y) + xored := make([]byte, len(X)); + copy(xored, X) + for i := 0; i < len(Y); i++ { + xored[offset + i] ^= Y[i] + } + return xored +} + +// SliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func SliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return +} + diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go b/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go new file mode 100644 index 0000000000..7f78cfa759 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go @@ -0,0 +1,317 @@ +// Copyright (C) 2019 ProtonTech AG + +// Package ocb provides an implementation of the OCB (offset codebook) mode of +// operation, as described in RFC-7253 of the IRTF and in Rogaway, Bellare, +// Black and Krovetz - OCB: A BLOCK-CIPHER MODE OF OPERATION FOR EFFICIENT +// AUTHENTICATED ENCRYPTION (2003). +// Security considerations (from RFC-7253): A private key MUST NOT be used to +// encrypt more than 2^48 blocks. Tag length should be at least 12 bytes (a +// brute-force forging adversary succeeds after 2^{tag length} attempts). A +// single key SHOULD NOT be used to decrypt ciphertext with different tag +// lengths. Nonces need not be secret, but MUST NOT be reused. +// This package only supports underlying block ciphers with 128-bit blocks, +// such as AES-{128, 192, 256}, but may be extended to other sizes. +package ocb + +import ( + "bytes" + "crypto/cipher" + "crypto/subtle" + "errors" + "github.com/ProtonMail/go-crypto/internal/byteutil" + "math/bits" +) + +type ocb struct { + block cipher.Block + tagSize int + nonceSize int + mask mask + // Optimized en/decrypt: For each nonce N used to en/decrypt, the 'Ktop' + // internal variable can be reused for en/decrypting with nonces sharing + // all but the last 6 bits with N. The prefix of the first nonce used to + // compute the new Ktop, and the Ktop value itself, are stored in + // reusableKtop. If using incremental nonces, this saves one block cipher + // call every 63 out of 64 OCB encryptions, and stores one nonce and one + // output of the block cipher in memory only. + reusableKtop reusableKtop +} + +type mask struct { + // L_*, L_$, (L_i)_{i ∈ N} + lAst []byte + lDol []byte + L [][]byte +} + +type reusableKtop struct { + noncePrefix []byte + Ktop []byte +} + +const ( + defaultTagSize = 16 + defaultNonceSize = 15 +) + +const ( + enc = iota + dec +) + +func (o *ocb) NonceSize() int { + return o.nonceSize +} + +func (o *ocb) Overhead() int { + return o.tagSize +} + +// NewOCB returns an OCB instance with the given block cipher and default +// tag and nonce sizes. +func NewOCB(block cipher.Block) (cipher.AEAD, error) { + return NewOCBWithNonceAndTagSize(block, defaultNonceSize, defaultTagSize) +} + +// NewOCBWithNonceAndTagSize returns an OCB instance with the given block +// cipher, nonce length, and tag length. Panics on zero nonceSize and +// exceedingly long tag size. +// +// It is recommended to use at least 12 bytes as tag length. +func NewOCBWithNonceAndTagSize( + block cipher.Block, nonceSize, tagSize int) (cipher.AEAD, error) { + if block.BlockSize() != 16 { + return nil, ocbError("Block cipher must have 128-bit blocks") + } + if nonceSize < 1 { + return nil, ocbError("Incorrect nonce length") + } + if nonceSize >= block.BlockSize() { + return nil, ocbError("Nonce length exceeds blocksize - 1") + } + if tagSize > block.BlockSize() { + return nil, ocbError("Custom tag length exceeds blocksize") + } + return &ocb{ + block: block, + tagSize: tagSize, + nonceSize: nonceSize, + mask: initializeMaskTable(block), + reusableKtop: reusableKtop{ + noncePrefix: nil, + Ktop: nil, + }, + }, nil +} + +func (o *ocb) Seal(dst, nonce, plaintext, adata []byte) []byte { + if len(nonce) > o.nonceSize { + panic("crypto/ocb: Incorrect nonce length given to OCB") + } + ret, out := byteutil.SliceForAppend(dst, len(plaintext)+o.tagSize) + o.crypt(enc, out, nonce, adata, plaintext) + return ret +} + +func (o *ocb) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) { + if len(nonce) > o.nonceSize { + panic("Nonce too long for this instance") + } + if len(ciphertext) < o.tagSize { + return nil, ocbError("Ciphertext shorter than tag length") + } + sep := len(ciphertext) - o.tagSize + ret, out := byteutil.SliceForAppend(dst, len(ciphertext)) + ciphertextData := ciphertext[:sep] + tag := ciphertext[sep:] + o.crypt(dec, out, nonce, adata, ciphertextData) + if subtle.ConstantTimeCompare(ret[sep:], tag) == 1 { + ret = ret[:sep] + return ret, nil + } + for i := range out { + out[i] = 0 + } + return nil, ocbError("Tag authentication failed") +} + +// On instruction enc (resp. dec), crypt is the encrypt (resp. decrypt) +// function. It returns the resulting plain/ciphertext with the tag appended. +func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte { + // + // Consider X as a sequence of 128-bit blocks + // + // Note: For encryption (resp. decryption), X is the plaintext (resp., the + // ciphertext without the tag). + blockSize := o.block.BlockSize() + + // + // Nonce-dependent and per-encryption variables + // + // Zero out the last 6 bits of the nonce into truncatedNonce to see if Ktop + // is already computed. + truncatedNonce := make([]byte, len(nonce)) + copy(truncatedNonce, nonce) + truncatedNonce[len(truncatedNonce)-1] &= 192 + Ktop := make([]byte, blockSize) + if bytes.Equal(truncatedNonce, o.reusableKtop.noncePrefix) { + Ktop = o.reusableKtop.Ktop + } else { + // Nonce = num2str(TAGLEN mod 128, 7) || zeros(120 - bitlen(N)) || 1 || N + paddedNonce := append(make([]byte, blockSize-1-len(nonce)), 1) + paddedNonce = append(paddedNonce, truncatedNonce...) + paddedNonce[0] |= byte(((8 * o.tagSize) % (8 * blockSize)) << 1) + // Last 6 bits of paddedNonce are already zero. Encrypt into Ktop + paddedNonce[blockSize-1] &= 192 + Ktop = paddedNonce + o.block.Encrypt(Ktop, Ktop) + o.reusableKtop.noncePrefix = truncatedNonce + o.reusableKtop.Ktop = Ktop + } + + // Stretch = Ktop || ((lower half of Ktop) XOR (lower half of Ktop << 8)) + xorHalves := make([]byte, blockSize/2) + byteutil.XorBytes(xorHalves, Ktop[:blockSize/2], Ktop[1:1+blockSize/2]) + stretch := append(Ktop, xorHalves...) + bottom := int(nonce[len(nonce)-1] & 63) + offset := make([]byte, len(stretch)) + byteutil.ShiftNBytesLeft(offset, stretch, bottom) + offset = offset[:blockSize] + + // + // Process any whole blocks + // + // Note: For encryption Y is ciphertext || tag, for decryption Y is + // plaintext || tag. + checksum := make([]byte, blockSize) + m := len(X) / blockSize + for i := 0; i < m; i++ { + index := bits.TrailingZeros(uint(i + 1)) + if len(o.mask.L)-1 < index { + o.mask.extendTable(index) + } + byteutil.XorBytesMut(offset, o.mask.L[bits.TrailingZeros(uint(i+1))]) + blockX := X[i*blockSize : (i+1)*blockSize] + blockY := Y[i*blockSize : (i+1)*blockSize] + byteutil.XorBytes(blockY, blockX, offset) + switch instruction { + case enc: + o.block.Encrypt(blockY, blockY) + byteutil.XorBytesMut(blockY, offset) + byteutil.XorBytesMut(checksum, blockX) + case dec: + o.block.Decrypt(blockY, blockY) + byteutil.XorBytesMut(blockY, offset) + byteutil.XorBytesMut(checksum, blockY) + } + } + // + // Process any final partial block and compute raw tag + // + tag := make([]byte, blockSize) + if len(X)%blockSize != 0 { + byteutil.XorBytesMut(offset, o.mask.lAst) + pad := make([]byte, blockSize) + o.block.Encrypt(pad, offset) + chunkX := X[blockSize*m:] + chunkY := Y[blockSize*m : len(X)] + byteutil.XorBytes(chunkY, chunkX, pad[:len(chunkX)]) + // P_* || bit(1) || zeroes(127) - len(P_*) + switch instruction { + case enc: + paddedY := append(chunkX, byte(128)) + paddedY = append(paddedY, make([]byte, blockSize-len(chunkX)-1)...) + byteutil.XorBytesMut(checksum, paddedY) + case dec: + paddedX := append(chunkY, byte(128)) + paddedX = append(paddedX, make([]byte, blockSize-len(chunkY)-1)...) + byteutil.XorBytesMut(checksum, paddedX) + } + byteutil.XorBytes(tag, checksum, offset) + byteutil.XorBytesMut(tag, o.mask.lDol) + o.block.Encrypt(tag, tag) + byteutil.XorBytesMut(tag, o.hash(adata)) + copy(Y[blockSize*m+len(chunkY):], tag[:o.tagSize]) + } else { + byteutil.XorBytes(tag, checksum, offset) + byteutil.XorBytesMut(tag, o.mask.lDol) + o.block.Encrypt(tag, tag) + byteutil.XorBytesMut(tag, o.hash(adata)) + copy(Y[blockSize*m:], tag[:o.tagSize]) + } + return Y +} + +// This hash function is used to compute the tag. Per design, on empty input it +// returns a slice of zeros, of the same length as the underlying block cipher +// block size. +func (o *ocb) hash(adata []byte) []byte { + // + // Consider A as a sequence of 128-bit blocks + // + A := make([]byte, len(adata)) + copy(A, adata) + blockSize := o.block.BlockSize() + + // + // Process any whole blocks + // + sum := make([]byte, blockSize) + offset := make([]byte, blockSize) + m := len(A) / blockSize + for i := 0; i < m; i++ { + chunk := A[blockSize*i : blockSize*(i+1)] + index := bits.TrailingZeros(uint(i + 1)) + // If the mask table is too short + if len(o.mask.L)-1 < index { + o.mask.extendTable(index) + } + byteutil.XorBytesMut(offset, o.mask.L[index]) + byteutil.XorBytesMut(chunk, offset) + o.block.Encrypt(chunk, chunk) + byteutil.XorBytesMut(sum, chunk) + } + + // + // Process any final partial block; compute final hash value + // + if len(A)%blockSize != 0 { + byteutil.XorBytesMut(offset, o.mask.lAst) + // Pad block with 1 || 0 ^ 127 - bitlength(a) + ending := make([]byte, blockSize-len(A)%blockSize) + ending[0] = 0x80 + encrypted := append(A[blockSize*m:], ending...) + byteutil.XorBytesMut(encrypted, offset) + o.block.Encrypt(encrypted, encrypted) + byteutil.XorBytesMut(sum, encrypted) + } + return sum +} + +func initializeMaskTable(block cipher.Block) mask { + // + // Key-dependent variables + // + lAst := make([]byte, block.BlockSize()) + block.Encrypt(lAst, lAst) + lDol := byteutil.GfnDouble(lAst) + L := make([][]byte, 1) + L[0] = byteutil.GfnDouble(lDol) + + return mask{ + lAst: lAst, + lDol: lDol, + L: L, + } +} + +// Extends the L array of mask m up to L[limit], with L[i] = GfnDouble(L[i-1]) +func (m *mask) extendTable(limit int) { + for i := len(m.L); i <= limit; i++ { + m.L = append(m.L, byteutil.GfnDouble(m.L[i-1])) + } +} + +func ocbError(err string) error { + return errors.New("crypto/ocb: " + err) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go b/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go new file mode 100644 index 0000000000..0efaf344fd --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go @@ -0,0 +1,136 @@ +// In the test vectors provided by RFC 7253, the "bottom" +// internal variable, which defines "offset" for the first time, does not +// exceed 15. However, it can attain values up to 63. + +// These vectors include key length in {128, 192, 256}, tag size 128, and +// random nonce, header, and plaintext lengths. + +// This file was automatically generated. + +package ocb + +var randomVectors = []struct { + key, nonce, header, plaintext, ciphertext string +}{ + + {"9438C5D599308EAF13F800D2D31EA7F0", + "C38EE4801BEBFFA1CD8635BE", + "0E507B7DADD8A98CDFE272D3CB6B3E8332B56AE583FB049C0874D4200BED16BD1A044182434E9DA0E841F182DFD5B3016B34641CED0784F1745F63AB3D0DA22D3351C9EF9A658B8081E24498EBF61FCE40DA6D8E184536", + "962D227786FB8913A8BAD5DC3250", + "EEDEF5FFA5986D1E3BF86DDD33EF9ADC79DCA06E215FA772CCBA814F63AD"}, + {"BA7DE631C7D6712167C6724F5B9A2B1D", + "35263EBDA05765DC0E71F1F5", + "0103257B4224507C0242FEFE821EA7FA42E0A82863E5F8B68F7D881B4B44FA428A2B6B21D2F591260802D8AB6D83", + "9D6D1FC93AE8A64E7889B7B2E3521EFA9B920A8DDB692E6F833DDC4A38AFA535E5E2A3ED82CB7E26404AB86C54D01C4668F28398C2DF33D5D561CBA1C8DCFA7A912F5048E545B59483C0E3221F54B14DAA2E4EB657B3BEF9554F34CAD69B2724AE962D3D8A", + "E93852D1985C5E775655E937FA79CE5BF28A585F2AF53A5018853B9634BE3C84499AC0081918FDCE0624494D60E25F76ACD6853AC7576E3C350F332249BFCABD4E73CEABC36BE4EDDA40914E598AE74174A0D7442149B26990899491BDDFE8FC54D6C18E83AE9E9A6FFBF5D376565633862EEAD88D"}, + {"2E74B25289F6FD3E578C24866E9C72A5", + "FD912F15025AF8414642BA1D1D", + "FB5FB8C26F365EEDAB5FE260C6E3CCD27806729C8335F146063A7F9EA93290E56CF84576EB446350D22AD730547C267B1F0BBB97EB34E1E2C41A", + "6C092EBF78F76EE8C1C6E592277D9545BA16EDB67BC7D8480B9827702DC2F8A129E2B08A2CE710CA7E1DA45CE162BB6CD4B512E632116E2211D3C90871EFB06B8D4B902681C7FB", + "6AC0A77F26531BF4F354A1737F99E49BE32ECD909A7A71AD69352906F54B08A9CE9B8CA5D724CBFFC5673437F23F630697F3B84117A1431D6FA8CC13A974FB4AD360300522E09511B99E71065D5AC4BBCB1D791E864EF4"}, + {"E7EC507C802528F790AFF5303A017B17", + "4B97A7A568940A9E3CE7A99E93031E", + "28349BDC5A09390C480F9B8AA3EDEA3DDB8B9D64BCA322C570B8225DF0E31190DAB25A4014BA39519E02ABFB12B89AA28BBFD29E486E7FB28734258C817B63CED9912DBAFEBB93E2798AB2890DE3B0ACFCFF906AB15563EF7823CE83D27CDB251195E22BD1337BCBDE65E7C2C427321C463C2777BFE5AEAA", + "9455B3EA706B74", + "7F33BA3EA848D48A96B9530E26888F43EBD4463C9399B6"}, + {"6C928AA3224736F28EE7378DE0090191", + "8936138E2E4C6A13280017A1622D", + "6202717F2631565BDCDC57C6584543E72A7C8BD444D0D108ED35069819633C", + "DA0691439E5F035F3E455269D14FE5C201C8C9B0A3FE2D3F86BCC59387C868FE65733D388360B31E3CE28B4BF6A8BE636706B536D5720DB66B47CF1C7A5AFD6F61E0EF90F1726D6B0E169F9A768B2B7AE4EE00A17F630AC905FCAAA1B707FFF25B3A1AAE83B504837C64A5639B2A34002B300EC035C9B43654DA55", + "B8804D182AB0F0EEB464FA7BD1329AD6154F982013F3765FEDFE09E26DAC078C9C1439BFC1159D6C02A25E3FF83EF852570117B315852AD5EE20E0FA3AA0A626B0E43BC0CEA38B44579DD36803455FB46989B90E6D229F513FD727AF8372517E9488384C515D6067704119C931299A0982EDDFB9C2E86A90C450C077EB222511EC9CCABC9FCFDB19F70088"}, + {"ECEA315CA4B3F425B0C9957A17805EA4", + "664CDAE18403F4F9BA13015A44FC", + "642AFB090D6C6DB46783F08B01A3EF2A8FEB5736B531EAC226E7888FCC8505F396818F83105065FACB3267485B9E5E4A0261F621041C08FCCB2A809A49AB5252A91D0971BCC620B9D614BD77E57A0EED2FA5", + "6852C31F8083E20E364CEA21BB7854D67CEE812FE1C9ED2425C0932A90D3780728D1BB", + "2ECEF962A9695A463ADABB275BDA9FF8B2BA57AEC2F52EFFB700CD9271A74D2A011C24AEA946051BD6291776429B7E681BA33E"}, + {"4EE616C4A58AAA380878F71A373461F6", + "91B8C9C176D9C385E9C47E52", + "CDA440B7F9762C572A718AC754EDEECC119E5EE0CCB9FEA4FFB22EEE75087C032EBF3DA9CDD8A28CC010B99ED45143B41A4BA50EA2A005473F89639237838867A57F23B0F0ED3BF22490E4501DAC9C658A9B9F", + "D6E645FA9AE410D15B8123FD757FA356A8DBE9258DDB5BE88832E615910993F497EC", + "B70ED7BF959FB2AAED4F36174A2A99BFB16992C8CDF369C782C4DB9C73DE78C5DB8E0615F647243B97ACDB24503BC9CADC48"}, + {"DCD475773136C830D5E3D0C5FE05B7FF", + "BB8E1FBB483BE7616A922C4A", + "36FEF2E1CB29E76A6EA663FC3AF66ECD7404F466382F7B040AABED62293302B56E8783EF7EBC21B4A16C3E78A7483A0A403F253A2CDC5BBF79DC3DAE6C73F39A961D8FBBE8D41B", + "441E886EA38322B2437ECA7DEB5282518865A66780A454E510878E61BFEC3106A3CD93D2A02052E6F9E1832F9791053E3B76BF4C07EFDD6D4106E3027FABB752E60C1AA425416A87D53938163817A1051EBA1D1DEEB4B9B25C7E97368B52E5911A31810B0EC5AF547559B6142D9F4C4A6EF24A4CF75271BF9D48F62B", + "1BE4DD2F4E25A6512C2CC71D24BBB07368589A94C2714962CD0ACE5605688F06342587521E75F0ACAFFD86212FB5C34327D238DB36CF2B787794B9A4412E7CD1410EA5DDD2450C265F29CF96013CD213FD2880657694D718558964BC189B4A84AFCF47EB012935483052399DBA5B088B0A0477F20DFE0E85DCB735E21F22A439FB837DD365A93116D063E607"}, + {"3FBA2B3D30177FFE15C1C59ED2148BB2C091F5615FBA7C07", + "FACF804A4BEBF998505FF9DE", + "8213B9263B2971A5BDA18DBD02208EE1", + "15B323926993B326EA19F892D704439FC478828322AF72118748284A1FD8A6D814E641F70512FD706980337379F31DC63355974738D7FEA87AD2858C0C2EBBFBE74371C21450072373C7B651B334D7C4D43260B9D7CCD3AF9EDB", + "6D35DC1469B26E6AAB26272A41B46916397C24C485B61162E640A062D9275BC33DDCFD3D9E1A53B6C8F51AC89B66A41D59B3574197A40D9B6DCF8A4E2A001409C8112F16B9C389E0096179DB914E05D6D11ED0005AD17E1CE105A2F0BAB8F6B1540DEB968B7A5428FF44"}, + {"53B52B8D4D748BCDF1DDE68857832FA46227FA6E2F32EFA1", + "0B0EF53D4606B28D1398355F", + "F23882436349094AF98BCACA8218E81581A043B19009E28EFBF2DE37883E04864148CC01D240552CA8844EC1456F42034653067DA67E80F87105FD06E14FF771246C9612867BE4D215F6D761", + "F15030679BD4088D42CAC9BF2E9606EAD4798782FA3ED8C57EBE7F84A53236F51B25967C6489D0CD20C9EEA752F9BC", + "67B96E2D67C3729C96DAEAEDF821D61C17E648643A2134C5621FEC621186915AD80864BFD1EB5B238BF526A679385E012A457F583AFA78134242E9D9C1B4E4"}, + {"0272DD80F23399F49BFC320381A5CD8225867245A49A7D41", + "5C83F4896D0738E1366B1836", + "69B0337289B19F73A12BAEEA857CCAF396C11113715D9500CCCF48BA08CFF12BC8B4BADB3084E63B85719DB5058FA7C2C11DEB096D7943CFA7CAF5", + "C01AD10FC8B562CD17C7BC2FAB3E26CBDFF8D7F4DEA816794BBCC12336991712972F52816AABAB244EB43B0137E2BAC1DD413CE79531E78BEF782E6B439612BB3AEF154DE3502784F287958EBC159419F9EBA27916A28D6307324129F506B1DE80C1755A929F87", + "FEFE52DD7159C8DD6E8EC2D3D3C0F37AB6CB471A75A071D17EC4ACDD8F3AA4D7D4F7BB559F3C09099E3D9003E5E8AA1F556B79CECDE66F85B08FA5955E6976BF2695EA076388A62D2AD5BAB7CBF1A7F3F4C8D5CDF37CDE99BD3E30B685D9E5EEE48C7C89118EF4878EB89747F28271FA2CC45F8E9E7601"}, + {"3EEAED04A455D6E5E5AB53CFD5AFD2F2BC625C7BF4BE49A5", + "36B88F63ADBB5668588181D774", + "D367E3CB3703E762D23C6533188EF7028EFF9D935A3977150361997EC9DEAF1E4794BDE26AA8B53C124980B1362EC86FCDDFC7A90073171C1BAEE351A53234B86C66E8AB92FAE99EC6967A6D3428892D80", + "573454C719A9A55E04437BF7CBAAF27563CCCD92ADD5E515CD63305DFF0687E5EEF790C5DCA5C0033E9AB129505E2775438D92B38F08F3B0356BA142C6F694", + "E9F79A5B432D9E682C9AAA5661CFC2E49A0FCB81A431E54B42EB73DD3BED3F377FEC556ABA81624BA64A5D739AD41467460088F8D4F442180A9382CA635745473794C382FCDDC49BA4EB6D8A44AE3C"}, + {"B695C691538F8CBD60F039D0E28894E3693CC7C36D92D79D", + "BC099AEB637361BAC536B57618", + "BFFF1A65AE38D1DC142C71637319F5F6508E2CB33C9DCB94202B359ED5A5ED8042E7F4F09231D32A7242976677E6F4C549BF65FADC99E5AF43F7A46FD95E16C2", + "081DF3FD85B415D803F0BE5AC58CFF0023FDDED99788296C3731D8", + "E50C64E3614D94FE69C47092E46ACC9957C6FEA2CCBF96BC62FBABE7424753C75F9C147C42AE26FE171531"}, + {"C9ACBD2718F0689A1BE9802A551B6B8D9CF5614DAF5E65ED", + "B1B0AAF373B8B026EB80422051D8", + "6648C0E61AC733C76119D23FB24548D637751387AA2EAE9D80E912B7BD486CAAD9EAF4D7A5FE2B54AAD481E8EC94BB4D558000896E2010462B70C9FED1E7273080D1", + "189F591F6CB6D59AFEDD14C341741A8F1037DC0DF00FC57CE65C30F49E860255CEA5DC6019380CC0FE8880BC1A9E685F41C239C38F36E3F2A1388865C5C311059C0A", + "922A5E949B61D03BE34AB5F4E58607D4504EA14017BB363DAE3C873059EA7A1C77A746FB78981671D26C2CF6D9F24952D510044CE02A10177E9DB42D0145211DFE6E84369C5E3BC2669EAB4147B2822895F9"}, + {"7A832BD2CF5BF4919F353CE2A8C86A5E406DA2D52BE16A72", + "2F2F17CECF7E5A756D10785A3CB9DB", + "61DA05E3788CC2D8405DBA70C7A28E5AF699863C9F72E6C6770126929F5D6FA267F005EBCF49495CB46400958A3AE80D1289D1C671", + "44E91121195A41AF14E8CFDBD39A4B517BE0DF1A72977ED8A3EEF8EEDA1166B2EB6DB2C4AE2E74FA0F0C74537F659BFBD141E5DDEC67E64EDA85AABD3F52C85A785B9FB3CECD70E7DF", + "BEDF596EA21288D2B84901E188F6EE1468B14D5161D3802DBFE00D60203A24E2AB62714BF272A45551489838C3A7FEAADC177B591836E73684867CCF4E12901DCF2064058726BBA554E84ADC5136F507E961188D4AF06943D3"}, + {"1508E8AE9079AA15F1CEC4F776B4D11BCCB061B58AA56C18", + "BCA625674F41D1E3AB47672DC0C3", + "8B12CF84F16360F0EAD2A41BC021530FFCEC7F3579CAE658E10E2D3D81870F65AFCED0C77C6C4C6E6BA424FF23088C796BA6195ABA35094BF1829E089662E7A95FC90750AE16D0C8AFA55DAC789D7735B970B58D4BE7CEC7341DA82A0179A01929C27A59C5063215B859EA43", + "E525422519ECE070E82C", + "B47BC07C3ED1C0A43BA52C43CBACBCDBB29CAF1001E09FDF7107"}, + {"7550C2761644E911FE9ADD119BAC07376BEA442845FEAD876D7E7AC1B713E464", + "36D2EC25ADD33CDEDF495205BBC923", + "7FCFE81A3790DE97FFC3DE160C470847EA7E841177C2F759571CBD837EA004A6CA8C6F4AEBFF2E9FD552D73EB8A30705D58D70C0B67AEEA280CBBF0A477358ACEF1E7508F2735CD9A0E4F9AC92B8C008F575D3B6278F1C18BD01227E3502E5255F3AB1893632AD00C717C588EF652A51A43209E7EE90", + "2B1A62F8FDFAA3C16470A21AD307C9A7D03ADE8EF72C69B06F8D738CDE578D7AEFD0D40BD9C022FB9F580DF5394C998ACCCEFC5471A3996FB8F1045A81FDC6F32D13502EA65A211390C8D882B8E0BEFD8DD8CBEF51D1597B124E9F7F", + "C873E02A22DB89EB0787DB6A60B99F7E4A0A085D5C4232A81ADCE2D60AA36F92DDC33F93DD8640AC0E08416B187FB382B3EC3EE85A64B0E6EE41C1366A5AD2A282F66605E87031CCBA2FA7B2DA201D975994AADE3DD1EE122AE09604AD489B84BF0C1AB7129EE16C6934850E"}, + {"A51300285E554FDBDE7F771A9A9A80955639DD87129FAEF74987C91FB9687C71", + "81691D5D20EC818FCFF24B33DECC", + "C948093218AA9EB2A8E44A87EEA73FC8B6B75A196819A14BD83709EA323E8DF8B491045220E1D88729A38DBCFFB60D3056DAD4564498FD6574F74512945DEB34B69329ACED9FFC05D5D59DFCD5B973E2ACAFE6AD1EF8BBBC49351A2DD12508ED89ED", + "EB861165DAF7625F827C6B574ED703F03215", + "C6CD1CE76D2B3679C1B5AA1CFD67CCB55444B6BFD3E22C81CBC9BB738796B83E54E3"}, + {"8CE0156D26FAEB7E0B9B800BBB2E9D4075B5EAC5C62358B0E7F6FCE610223282", + "D2A7B94DD12CDACA909D3AD7", + "E021A78F374FC271389AB9A3E97077D755", + "7C26000B58929F5095E1CEE154F76C2A299248E299F9B5ADE6C403AA1FD4A67FD4E0232F214CE7B919EE7A1027D2B76C57475715CD078461", + "C556FB38DF069B56F337B5FF5775CE6EAA16824DFA754F20B78819028EA635C3BB7AA731DE8776B2DCB67DCA2D33EEDF3C7E52EA450013722A41755A0752433ED17BDD5991AAE77A"}, + {"1E8000A2CE00A561C9920A30BF0D7B983FEF8A1014C8F04C35CA6970E6BA02BD", + "65ED3D63F79F90BBFD19775E", + "336A8C0B7243582A46B221AA677647FCAE91", + "134A8B34824A290E7B", + "914FBEF80D0E6E17F8BDBB6097EBF5FBB0554952DC2B9E5151"}, + {"53D5607BBE690B6E8D8F6D97F3DF2BA853B682597A214B8AA0EA6E598650AF15", + "C391A856B9FE234E14BA1AC7BB40FF", + "479682BC21349C4BE1641D5E78FE2C79EC1B9CF5470936DCAD9967A4DCD7C4EFADA593BC9EDE71E6A08829B8580901B61E274227E9D918502DE3", + "EAD154DC09C5E26C5D26FF33ED148B27120C7F2C23225CC0D0631B03E1F6C6D96FEB88C1A4052ACB4CE746B884B6502931F407021126C6AAB8C514C077A5A38438AE88EE", + "938821286EBB671D999B87C032E1D6055392EB564E57970D55E545FC5E8BAB90E6E3E3C0913F6320995FC636D72CD9919657CC38BD51552F4A502D8D1FE56DB33EBAC5092630E69EBB986F0E15CEE9FC8C052501"}, + {"294362FCC984F440CEA3E9F7D2C06AF20C53AAC1B3738CA2186C914A6E193ABB", + "B15B61C8BB39261A8F55AB178EC3", + "D0729B6B75BB", + "2BD089ADCE9F334BAE3B065996C7D616DD0C27DF4218DCEEA0FBCA0F968837CE26B0876083327E25681FDDD620A32EC0DA12F73FAE826CC94BFF2B90A54D2651", + "AC94B25E4E21DE2437B806966CCD5D9385EF0CD4A51AB9FA6DE675C7B8952D67802E9FEC1FDE9F5D1EAB06057498BC0EEA454804FC9D2068982A3E24182D9AC2E7AB9994DDC899A604264583F63D066B"}, + {"959DBFEB039B1A5B8CE6A44649B602AAA5F98A906DB96143D202CD2024F749D9", + "01D7BDB1133E9C347486C1EFA6", + "F3843955BD741F379DD750585EDC55E2CDA05CCBA8C1F4622AC2FE35214BC3A019B8BD12C4CC42D9213D1E1556941E8D8450830287FFB3B763A13722DD4140ED9846FB5FFF745D7B0B967D810A068222E10B259AF1D392035B0D83DC1498A6830B11B2418A840212599171E0258A1C203B05362978", + "A21811232C950FA8B12237C2EBD6A7CD2C3A155905E9E0C7C120", + "63C1CE397B22F1A03F1FA549B43178BC405B152D3C95E977426D519B3DFCA28498823240592B6EEE7A14"}, + {"096AE499F5294173F34FF2B375F0E5D5AB79D0D03B33B1A74D7D576826345DF4", + "0C52B3D11D636E5910A4DD76D32C", + "229E9ECA3053789E937447BC719467075B6138A142DA528DA8F0CF8DDF022FD9AF8E74779BA3AC306609", + "8B7A00038783E8BAF6EDEAE0C4EAB48FC8FD501A588C7E4A4DB71E3604F2155A97687D3D2FFF8569261375A513CF4398CE0F87CA1658A1050F6EF6C4EA3E25", + "C20B6CF8D3C8241825FD90B2EDAC7593600646E579A8D8DAAE9E2E40C3835FE801B2BE4379131452BC5182C90307B176DFBE2049544222FE7783147B690774F6D9D7CEF52A91E61E298E9AA15464AC"}, +} diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go new file mode 100644 index 0000000000..843085589c --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go @@ -0,0 +1,78 @@ +package ocb + +import ( + "encoding/hex" +) + +// Test vectors from https://tools.ietf.org/html/rfc7253. Note that key is +// shared accross tests. +var testKey, _ = hex.DecodeString("000102030405060708090A0B0C0D0E0F") + +var rfc7253testVectors = []struct { + nonce, header, plaintext, ciphertext string +}{ + {"BBAA99887766554433221100", + "", + "", + "785407BFFFC8AD9EDCC5520AC9111EE6"}, + {"BBAA99887766554433221101", + "0001020304050607", + "0001020304050607", + "6820B3657B6F615A5725BDA0D3B4EB3A257C9AF1F8F03009"}, + {"BBAA99887766554433221102", + "0001020304050607", + "", + "81017F8203F081277152FADE694A0A00"}, + {"BBAA99887766554433221103", + "", + "0001020304050607", + "45DD69F8F5AAE72414054CD1F35D82760B2CD00D2F99BFA9"}, + {"BBAA99887766554433221104", + "000102030405060708090A0B0C0D0E0F", + "000102030405060708090A0B0C0D0E0F", + "571D535B60B277188BE5147170A9A22C3AD7A4FF3835B8C5701C1CCEC8FC3358"}, + {"BBAA99887766554433221105", + "000102030405060708090A0B0C0D0E0F", + "", + "8CF761B6902EF764462AD86498CA6B97"}, + {"BBAA99887766554433221106", + "", + "000102030405060708090A0B0C0D0E0F", + "5CE88EC2E0692706A915C00AEB8B2396F40E1C743F52436BDF06D8FA1ECA343D"}, + {"BBAA99887766554433221107", + "000102030405060708090A0B0C0D0E0F1011121314151617", + "000102030405060708090A0B0C0D0E0F1011121314151617", + "1CA2207308C87C010756104D8840CE1952F09673A448A122C92C62241051F57356D7F3C90BB0E07F"}, + {"BBAA99887766554433221108", + "000102030405060708090A0B0C0D0E0F1011121314151617", + "", + "6DC225A071FC1B9F7C69F93B0F1E10DE"}, + {"BBAA99887766554433221109", + "", + "000102030405060708090A0B0C0D0E0F1011121314151617", + "221BD0DE7FA6FE993ECCD769460A0AF2D6CDED0C395B1C3CE725F32494B9F914D85C0B1EB38357FF"}, + {"BBAA9988776655443322110A", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", + "BD6F6C496201C69296C11EFD138A467ABD3C707924B964DEAFFC40319AF5A48540FBBA186C5553C68AD9F592A79A4240"}, + {"BBAA9988776655443322110B", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", + "", + "FE80690BEE8A485D11F32965BC9D2A32"}, + {"BBAA9988776655443322110C", + "", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", + "2942BFC773BDA23CABC6ACFD9BFD5835BD300F0973792EF46040C53F1432BCDFB5E1DDE3BC18A5F840B52E653444D5DF"}, + {"BBAA9988776655443322110D", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "D5CA91748410C1751FF8A2F618255B68A0A12E093FF454606E59F9C1D0DDC54B65E8628E568BAD7AED07BA06A4A69483A7035490C5769E60"}, + {"BBAA9988776655443322110E", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "", + "C5CD9D1850C141E358649994EE701B68"}, + {"BBAA9988776655443322110F", + "", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "4412923493C57D5DE0D700F753CCE0D1D2D95060122E9F15A5DDBFC5787E50B5CC55EE507BCB084E479AD363AC366B95A98CA5F3000B1479"}, +} diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go new file mode 100644 index 0000000000..5dc158f012 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go @@ -0,0 +1,24 @@ +package ocb + +// Second set of test vectors from https://tools.ietf.org/html/rfc7253 +var rfc7253TestVectorTaglen96 = struct { + key, nonce, header, plaintext, ciphertext string +}{"0F0E0D0C0B0A09080706050403020100", + "BBAA9988776655443322110D", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "1792A4E31E0755FB03E31B22116E6C2DDF9EFD6E33D536F1A0124B0A55BAE884ED93481529C76B6AD0C515F4D1CDD4FDAC4F02AA"} + +var rfc7253AlgorithmTest = []struct { + KEYLEN, TAGLEN int + OUTPUT string }{ + {128, 128, "67E944D23256C5E0B6C61FA22FDF1EA2"}, + {192, 128, "F673F2C3E7174AAE7BAE986CA9F29E17"}, + {256, 128, "D90EB8E9C977C88B79DD793D7FFA161C"}, + {128, 96, "77A3D8E73589158D25D01209"}, + {192, 96, "05D56EAD2752C86BE6932C5E"}, + {256, 96, "5458359AC23B0CBA9E6330DD"}, + {128, 64, "192C9B7BD90BA06A"}, + {192, 64, "0066BC6E0EF34E24"}, + {256, 64, "7D4EA5D445501CBE"}, + } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go new file mode 100644 index 0000000000..3c6251d1ce --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go @@ -0,0 +1,153 @@ +// Copyright 2014 Matthew Endsley +// All rights reserved +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted providing that the following conditions +// are met: +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +// IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// Package keywrap is an implementation of the RFC 3394 AES key wrapping +// algorithm. This is used in OpenPGP with elliptic curve keys. +package keywrap + +import ( + "crypto/aes" + "encoding/binary" + "errors" +) + +var ( + // ErrWrapPlaintext is returned if the plaintext is not a multiple + // of 64 bits. + ErrWrapPlaintext = errors.New("keywrap: plainText must be a multiple of 64 bits") + + // ErrUnwrapCiphertext is returned if the ciphertext is not a + // multiple of 64 bits. + ErrUnwrapCiphertext = errors.New("keywrap: cipherText must by a multiple of 64 bits") + + // ErrUnwrapFailed is returned if unwrapping a key fails. + ErrUnwrapFailed = errors.New("keywrap: failed to unwrap key") + + // NB: the AES NewCipher call only fails if the key is an invalid length. + + // ErrInvalidKey is returned when the AES key is invalid. + ErrInvalidKey = errors.New("keywrap: invalid AES key") +) + +// Wrap a key using the RFC 3394 AES Key Wrap Algorithm. +func Wrap(key, plainText []byte) ([]byte, error) { + if len(plainText)%8 != 0 { + return nil, ErrWrapPlaintext + } + + c, err := aes.NewCipher(key) + if err != nil { + return nil, ErrInvalidKey + } + + nblocks := len(plainText) / 8 + + // 1) Initialize variables. + var block [aes.BlockSize]byte + // - Set A = IV, an initial value (see 2.2.3) + for ii := 0; ii < 8; ii++ { + block[ii] = 0xA6 + } + + // - For i = 1 to n + // - Set R[i] = P[i] + intermediate := make([]byte, len(plainText)) + copy(intermediate, plainText) + + // 2) Calculate intermediate values. + for ii := 0; ii < 6; ii++ { + for jj := 0; jj < nblocks; jj++ { + // - B = AES(K, A | R[i]) + copy(block[8:], intermediate[jj*8:jj*8+8]) + c.Encrypt(block[:], block[:]) + + // - A = MSB(64, B) ^ t where t = (n*j)+1 + t := uint64(ii*nblocks + jj + 1) + val := binary.BigEndian.Uint64(block[:8]) ^ t + binary.BigEndian.PutUint64(block[:8], val) + + // - R[i] = LSB(64, B) + copy(intermediate[jj*8:jj*8+8], block[8:]) + } + } + + // 3) Output results. + // - Set C[0] = A + // - For i = 1 to n + // - C[i] = R[i] + return append(block[:8], intermediate...), nil +} + +// Unwrap a key using the RFC 3394 AES Key Wrap Algorithm. +func Unwrap(key, cipherText []byte) ([]byte, error) { + if len(cipherText)%8 != 0 { + return nil, ErrUnwrapCiphertext + } + + c, err := aes.NewCipher(key) + if err != nil { + return nil, ErrInvalidKey + } + + nblocks := len(cipherText)/8 - 1 + + // 1) Initialize variables. + var block [aes.BlockSize]byte + // - Set A = C[0] + copy(block[:8], cipherText[:8]) + + // - For i = 1 to n + // - Set R[i] = C[i] + intermediate := make([]byte, len(cipherText)-8) + copy(intermediate, cipherText[8:]) + + // 2) Compute intermediate values. + for jj := 5; jj >= 0; jj-- { + for ii := nblocks - 1; ii >= 0; ii-- { + // - B = AES-1(K, (A ^ t) | R[i]) where t = n*j+1 + // - A = MSB(64, B) + t := uint64(jj*nblocks + ii + 1) + val := binary.BigEndian.Uint64(block[:8]) ^ t + binary.BigEndian.PutUint64(block[:8], val) + + copy(block[8:], intermediate[ii*8:ii*8+8]) + c.Decrypt(block[:], block[:]) + + // - R[i] = LSB(B, 64) + copy(intermediate[ii*8:ii*8+8], block[8:]) + } + } + + // 3) Output results. + // - If A is an appropriate initial value (see 2.2.3), + for ii := 0; ii < 8; ii++ { + if block[ii] != 0xA6 { + return nil, ErrUnwrapFailed + } + } + + // - For i = 1 to n + // - P[i] = R[i] + return intermediate, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go new file mode 100644 index 0000000000..3b357e5851 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go @@ -0,0 +1,224 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is +// very similar to PEM except that it has an additional CRC checksum. +package armor // import "github.com/ProtonMail/go-crypto/openpgp/armor" + +import ( + "bufio" + "bytes" + "encoding/base64" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "io" +) + +// A Block represents an OpenPGP armored structure. +// +// The encoded form is: +// -----BEGIN Type----- +// Headers +// +// base64-encoded Bytes +// '=' base64 encoded checksum +// -----END Type----- +// where Headers is a possibly empty sequence of Key: Value lines. +// +// Since the armored data can be very large, this package presents a streaming +// interface. +type Block struct { + Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE"). + Header map[string]string // Optional headers. + Body io.Reader // A Reader from which the contents can be read + lReader lineReader + oReader openpgpReader +} + +var ArmorCorrupt error = errors.StructuralError("armor invalid") + +const crc24Init = 0xb704ce +const crc24Poly = 0x1864cfb +const crc24Mask = 0xffffff + +// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 +func crc24(crc uint32, d []byte) uint32 { + for _, b := range d { + crc ^= uint32(b) << 16 + for i := 0; i < 8; i++ { + crc <<= 1 + if crc&0x1000000 != 0 { + crc ^= crc24Poly + } + } + } + return crc +} + +var armorStart = []byte("-----BEGIN ") +var armorEnd = []byte("-----END ") +var armorEndOfLine = []byte("-----") + +// lineReader wraps a line based reader. It watches for the end of an armor +// block and records the expected CRC value. +type lineReader struct { + in *bufio.Reader + buf []byte + eof bool + crc uint32 + crcSet bool +} + +func (l *lineReader) Read(p []byte) (n int, err error) { + if l.eof { + return 0, io.EOF + } + + if len(l.buf) > 0 { + n = copy(p, l.buf) + l.buf = l.buf[n:] + return + } + + line, isPrefix, err := l.in.ReadLine() + if err != nil { + return + } + if isPrefix { + return 0, ArmorCorrupt + } + + if bytes.HasPrefix(line, armorEnd) { + l.eof = true + return 0, io.EOF + } + + if len(line) == 5 && line[0] == '=' { + // This is the checksum line + var expectedBytes [3]byte + var m int + m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:]) + if m != 3 || err != nil { + return + } + l.crc = uint32(expectedBytes[0])<<16 | + uint32(expectedBytes[1])<<8 | + uint32(expectedBytes[2]) + + line, _, err = l.in.ReadLine() + if err != nil && err != io.EOF { + return + } + if !bytes.HasPrefix(line, armorEnd) { + return 0, ArmorCorrupt + } + + l.eof = true + l.crcSet = true + return 0, io.EOF + } + + if len(line) > 96 { + return 0, ArmorCorrupt + } + + n = copy(p, line) + bytesToSave := len(line) - n + if bytesToSave > 0 { + if cap(l.buf) < bytesToSave { + l.buf = make([]byte, 0, bytesToSave) + } + l.buf = l.buf[0:bytesToSave] + copy(l.buf, line[n:]) + } + + return +} + +// openpgpReader passes Read calls to the underlying base64 decoder, but keeps +// a running CRC of the resulting data and checks the CRC against the value +// found by the lineReader at EOF. +type openpgpReader struct { + lReader *lineReader + b64Reader io.Reader + currentCRC uint32 +} + +func (r *openpgpReader) Read(p []byte) (n int, err error) { + n, err = r.b64Reader.Read(p) + r.currentCRC = crc24(r.currentCRC, p[:n]) + + if err == io.EOF && r.lReader.crcSet && r.lReader.crc != uint32(r.currentCRC&crc24Mask) { + return 0, ArmorCorrupt + } + + return +} + +// Decode reads a PGP armored block from the given Reader. It will ignore +// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The +// given Reader is not usable after calling this function: an arbitrary amount +// of data may have been read past the end of the block. +func Decode(in io.Reader) (p *Block, err error) { + r := bufio.NewReaderSize(in, 100) + var line []byte + ignoreNext := false + +TryNextBlock: + p = nil + + // Skip leading garbage + for { + ignoreThis := ignoreNext + line, ignoreNext, err = r.ReadLine() + if err != nil { + return + } + if ignoreNext || ignoreThis { + continue + } + line = bytes.TrimSpace(line) + if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) { + break + } + } + + p = new(Block) + p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)]) + p.Header = make(map[string]string) + nextIsContinuation := false + var lastKey string + + // Read headers + for { + isContinuation := nextIsContinuation + line, nextIsContinuation, err = r.ReadLine() + if err != nil { + p = nil + return + } + if isContinuation { + p.Header[lastKey] += string(line) + continue + } + line = bytes.TrimSpace(line) + if len(line) == 0 { + break + } + + i := bytes.Index(line, []byte(": ")) + if i == -1 { + goto TryNextBlock + } + lastKey = string(line[:i]) + p.Header[lastKey] = string(line[i+2:]) + } + + p.lReader.in = r + p.oReader.currentCRC = crc24Init + p.oReader.lReader = &p.lReader + p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader) + p.Body = &p.oReader + + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go new file mode 100644 index 0000000000..6f07582c37 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go @@ -0,0 +1,160 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package armor + +import ( + "encoding/base64" + "io" +) + +var armorHeaderSep = []byte(": ") +var blockEnd = []byte("\n=") +var newline = []byte("\n") +var armorEndOfLineOut = []byte("-----\n") + +// writeSlices writes its arguments to the given Writer. +func writeSlices(out io.Writer, slices ...[]byte) (err error) { + for _, s := range slices { + _, err = out.Write(s) + if err != nil { + return err + } + } + return +} + +// lineBreaker breaks data across several lines, all of the same byte length +// (except possibly the last). Lines are broken with a single '\n'. +type lineBreaker struct { + lineLength int + line []byte + used int + out io.Writer + haveWritten bool +} + +func newLineBreaker(out io.Writer, lineLength int) *lineBreaker { + return &lineBreaker{ + lineLength: lineLength, + line: make([]byte, lineLength), + used: 0, + out: out, + } +} + +func (l *lineBreaker) Write(b []byte) (n int, err error) { + n = len(b) + + if n == 0 { + return + } + + if l.used == 0 && l.haveWritten { + _, err = l.out.Write([]byte{'\n'}) + if err != nil { + return + } + } + + if l.used+len(b) < l.lineLength { + l.used += copy(l.line[l.used:], b) + return + } + + l.haveWritten = true + _, err = l.out.Write(l.line[0:l.used]) + if err != nil { + return + } + excess := l.lineLength - l.used + l.used = 0 + + _, err = l.out.Write(b[0:excess]) + if err != nil { + return + } + + _, err = l.Write(b[excess:]) + return +} + +func (l *lineBreaker) Close() (err error) { + if l.used > 0 { + _, err = l.out.Write(l.line[0:l.used]) + if err != nil { + return + } + } + + return +} + +// encoding keeps track of a running CRC24 over the data which has been written +// to it and outputs a OpenPGP checksum when closed, followed by an armor +// trailer. +// +// It's built into a stack of io.Writers: +// encoding -> base64 encoder -> lineBreaker -> out +type encoding struct { + out io.Writer + breaker *lineBreaker + b64 io.WriteCloser + crc uint32 + blockType []byte +} + +func (e *encoding) Write(data []byte) (n int, err error) { + e.crc = crc24(e.crc, data) + return e.b64.Write(data) +} + +func (e *encoding) Close() (err error) { + err = e.b64.Close() + if err != nil { + return + } + e.breaker.Close() + + var checksumBytes [3]byte + checksumBytes[0] = byte(e.crc >> 16) + checksumBytes[1] = byte(e.crc >> 8) + checksumBytes[2] = byte(e.crc) + + var b64ChecksumBytes [4]byte + base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) + + return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine) +} + +// Encode returns a WriteCloser which will encode the data written to it in +// OpenPGP armor. +func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { + bType := []byte(blockType) + err = writeSlices(out, armorStart, bType, armorEndOfLineOut) + if err != nil { + return + } + + for k, v := range headers { + err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline) + if err != nil { + return + } + } + + _, err = out.Write(newline) + if err != nil { + return + } + + e := &encoding{ + out: out, + breaker: newLineBreaker(out, 64), + crc: crc24Init, + blockType: bType, + } + e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker) + return e, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go new file mode 100644 index 0000000000..a94f6150c4 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go @@ -0,0 +1,65 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "hash" + "io" +) + +// NewCanonicalTextHash reformats text written to it into the canonical +// form and then applies the hash h. See RFC 4880, section 5.2.1. +func NewCanonicalTextHash(h hash.Hash) hash.Hash { + return &canonicalTextHash{h, 0} +} + +type canonicalTextHash struct { + h hash.Hash + s int +} + +var newline = []byte{'\r', '\n'} + +func writeCanonical(cw io.Writer, buf []byte, s *int) (int, error) { + start := 0 + for i, c := range buf { + switch *s { + case 0: + if c == '\r' { + *s = 1 + } else if c == '\n' { + cw.Write(buf[start:i]) + cw.Write(newline) + start = i + 1 + } + case 1: + *s = 0 + } + } + + cw.Write(buf[start:]) + return len(buf), nil +} + +func (cth *canonicalTextHash) Write(buf []byte) (int, error) { + return writeCanonical(cth.h, buf, &cth.s) +} + +func (cth *canonicalTextHash) Sum(in []byte) []byte { + return cth.h.Sum(in) +} + +func (cth *canonicalTextHash) Reset() { + cth.h.Reset() + cth.s = 0 +} + +func (cth *canonicalTextHash) Size() int { + return cth.h.Size() +} + +func (cth *canonicalTextHash) BlockSize() int { + return cth.h.BlockSize() +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/clearsign/clearsign.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/clearsign/clearsign.go new file mode 100644 index 0000000000..ec66699c2f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/clearsign/clearsign.go @@ -0,0 +1,462 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package clearsign generates and processes OpenPGP, clear-signed data. See +// RFC 4880, section 7. +// +// Clearsigned messages are cryptographically signed, but the contents of the +// message are kept in plaintext so that it can be read without special tools. +package clearsign // import "github.com/ProtonMail/go-crypto/openpgp/clearsign" + +import ( + "bufio" + "bytes" + "crypto" + "fmt" + "hash" + "io" + "net/textproto" + "strconv" + "strings" + + "github.com/ProtonMail/go-crypto/openpgp" + "github.com/ProtonMail/go-crypto/openpgp/armor" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/packet" +) + +// A Block represents a clearsigned message. A signature on a Block can +// be checked by calling Block.VerifySignature. +type Block struct { + Headers textproto.MIMEHeader // Optional unverified Hash headers + Plaintext []byte // The original message text + Bytes []byte // The signed message + ArmoredSignature *armor.Block // The signature block +} + +// start is the marker which denotes the beginning of a clearsigned message. +var start = []byte("\n-----BEGIN PGP SIGNED MESSAGE-----") + +// dashEscape is prefixed to any lines that begin with a hyphen so that they +// can't be confused with endText. +var dashEscape = []byte("- ") + +// endText is a marker which denotes the end of the message and the start of +// an armored signature. +var endText = []byte("-----BEGIN PGP SIGNATURE-----") + +// end is a marker which denotes the end of the armored signature. +var end = []byte("\n-----END PGP SIGNATURE-----") + +var crlf = []byte("\r\n") +var lf = byte('\n') + +// getLine returns the first \r\n or \n delineated line from the given byte +// array. The line does not include the \r\n or \n. The remainder of the byte +// array (also not including the new line bytes) is also returned and this will +// always be smaller than the original argument. +func getLine(data []byte) (line, rest []byte) { + i := bytes.Index(data, []byte{'\n'}) + var j int + if i < 0 { + i = len(data) + j = i + } else { + j = i + 1 + if i > 0 && data[i-1] == '\r' { + i-- + } + } + return data[0:i], data[j:] +} + +// Decode finds the first clearsigned message in data and returns it, as well as +// the suffix of data which remains after the message. Any prefix data is +// discarded. +// +// If no message is found, or if the message is invalid, Decode returns nil and +// the whole data slice. The only allowed header type is Hash, and it is not +// verified against the signature hash. +func Decode(data []byte) (b *Block, rest []byte) { + // start begins with a newline. However, at the very beginning of + // the byte array, we'll accept the start string without it. + rest = data + if bytes.HasPrefix(data, start[1:]) { + rest = rest[len(start)-1:] + } else if i := bytes.Index(data, start); i >= 0 { + rest = rest[i+len(start):] + } else { + return nil, data + } + + // Consume the start line and check it does not have a suffix. + suffix, rest := getLine(rest) + if len(suffix) != 0 { + return nil, data + } + + var line []byte + b = &Block{ + Headers: make(textproto.MIMEHeader), + } + + // Next come a series of header lines. + for { + // This loop terminates because getLine's second result is + // always smaller than its argument. + if len(rest) == 0 { + return nil, data + } + // An empty line marks the end of the headers. + if line, rest = getLine(rest); len(line) == 0 { + break + } + + // Reject headers with control or Unicode characters. + if i := bytes.IndexFunc(line, func(r rune) bool { + return r < 0x20 || r > 0x7e + }); i != -1 { + return nil, data + } + + i := bytes.Index(line, []byte{':'}) + if i == -1 { + return nil, data + } + + key, val := string(line[0:i]), string(line[i+1:]) + key = strings.TrimSpace(key) + if key != "Hash" { + return nil, data + } + for _, val := range strings.Split(val, ",") { + val = strings.TrimSpace(val) + b.Headers.Add(key, val) + } + } + + firstLine := true + for { + start := rest + + line, rest = getLine(rest) + if len(line) == 0 && len(rest) == 0 { + // No armored data was found, so this isn't a complete message. + return nil, data + } + if bytes.Equal(line, endText) { + // Back up to the start of the line because armor expects to see the + // header line. + rest = start + break + } + + // The final CRLF isn't included in the hash so we don't write it until + // we've seen the next line. + if firstLine { + firstLine = false + } else { + b.Bytes = append(b.Bytes, crlf...) + } + + if bytes.HasPrefix(line, dashEscape) { + line = line[2:] + } + line = bytes.TrimRight(line, " \t") + b.Bytes = append(b.Bytes, line...) + + b.Plaintext = append(b.Plaintext, line...) + b.Plaintext = append(b.Plaintext, lf) + } + + // We want to find the extent of the armored data (including any newlines at + // the end). + i := bytes.Index(rest, end) + if i == -1 { + return nil, data + } + i += len(end) + for i < len(rest) && (rest[i] == '\r' || rest[i] == '\n') { + i++ + } + armored := rest[:i] + rest = rest[i:] + + var err error + b.ArmoredSignature, err = armor.Decode(bytes.NewBuffer(armored)) + if err != nil { + return nil, data + } + + return b, rest +} + +// A dashEscaper is an io.WriteCloser which processes the body of a clear-signed +// message. The clear-signed message is written to buffered and a hash, suitable +// for signing, is maintained in h. +// +// When closed, an armored signature is created and written to complete the +// message. +type dashEscaper struct { + buffered *bufio.Writer + hashers []hash.Hash // one per key in privateKeys + hashType crypto.Hash + toHash io.Writer // writes to all the hashes in hashers + + atBeginningOfLine bool + isFirstLine bool + + whitespace []byte + byteBuf []byte // a one byte buffer to save allocations + + privateKeys []*packet.PrivateKey + config *packet.Config +} + +func (d *dashEscaper) Write(data []byte) (n int, err error) { + for _, b := range data { + d.byteBuf[0] = b + + if d.atBeginningOfLine { + // The final CRLF isn't included in the hash so we have to wait + // until this point (the start of the next line) before writing it. + if !d.isFirstLine { + d.toHash.Write(crlf) + } + d.isFirstLine = false + } + + // Any whitespace at the end of the line has to be removed so we + // buffer it until we find out whether there's more on this line. + if b == ' ' || b == '\t' || b == '\r' { + d.whitespace = append(d.whitespace, b) + d.atBeginningOfLine = false + continue + } + + if d.atBeginningOfLine { + // At the beginning of a line, hyphens have to be escaped. + if b == '-' { + // The signature isn't calculated over the dash-escaped text so + // the escape is only written to buffered. + if _, err = d.buffered.Write(dashEscape); err != nil { + return + } + d.toHash.Write(d.byteBuf) + d.atBeginningOfLine = false + } else if b == '\n' { + // Nothing to do because we delay writing CRLF to the hash. + } else { + d.toHash.Write(d.byteBuf) + d.atBeginningOfLine = false + } + if err = d.buffered.WriteByte(b); err != nil { + return + } + } else { + if b == '\n' { + // We got a raw \n. Drop any trailing whitespace and write a + // CRLF. + d.whitespace = d.whitespace[:0] + // We delay writing CRLF to the hash until the start of the + // next line. + if err = d.buffered.WriteByte(b); err != nil { + return + } + d.atBeginningOfLine = true + } else { + // Any buffered whitespace wasn't at the end of the line so + // we need to write it out. + if len(d.whitespace) > 0 { + d.toHash.Write(d.whitespace) + if _, err = d.buffered.Write(d.whitespace); err != nil { + return + } + d.whitespace = d.whitespace[:0] + } + d.toHash.Write(d.byteBuf) + if err = d.buffered.WriteByte(b); err != nil { + return + } + } + } + } + + n = len(data) + return +} + +func (d *dashEscaper) Close() (err error) { + if !d.atBeginningOfLine { + if err = d.buffered.WriteByte(lf); err != nil { + return + } + } + + out, err := armor.Encode(d.buffered, "PGP SIGNATURE", nil) + if err != nil { + return + } + + t := d.config.Now() + for i, k := range d.privateKeys { + sig := new(packet.Signature) + sig.SigType = packet.SigTypeText + sig.PubKeyAlgo = k.PubKeyAlgo + sig.Hash = d.hashType + sig.CreationTime = t + sig.IssuerKeyId = &k.KeyId + + if err = sig.Sign(d.hashers[i], k, d.config); err != nil { + return + } + if err = sig.Serialize(out); err != nil { + return + } + } + + if err = out.Close(); err != nil { + return + } + if err = d.buffered.Flush(); err != nil { + return + } + return +} + +// Encode returns a WriteCloser which will clear-sign a message with privateKey +// and write it to w. If config is nil, sensible defaults are used. +func Encode(w io.Writer, privateKey *packet.PrivateKey, config *packet.Config) (plaintext io.WriteCloser, err error) { + return EncodeMulti(w, []*packet.PrivateKey{privateKey}, config) +} + +// EncodeMulti returns a WriteCloser which will clear-sign a message with all the +// private keys indicated and write it to w. If config is nil, sensible defaults +// are used. +func EncodeMulti(w io.Writer, privateKeys []*packet.PrivateKey, config *packet.Config) (plaintext io.WriteCloser, err error) { + for _, k := range privateKeys { + if k.Encrypted { + return nil, errors.InvalidArgumentError(fmt.Sprintf("signing key %s is encrypted", k.KeyIdString())) + } + } + + hashType := config.Hash() + name := nameOfHash(hashType) + if len(name) == 0 { + return nil, errors.UnsupportedError("unknown hash type: " + strconv.Itoa(int(hashType))) + } + + if !hashType.Available() { + return nil, errors.UnsupportedError("unsupported hash type: " + strconv.Itoa(int(hashType))) + } + var hashers []hash.Hash + var ws []io.Writer + for range privateKeys { + h := hashType.New() + hashers = append(hashers, h) + ws = append(ws, h) + } + toHash := io.MultiWriter(ws...) + + buffered := bufio.NewWriter(w) + // start has a \n at the beginning that we don't want here. + if _, err = buffered.Write(start[1:]); err != nil { + return + } + if err = buffered.WriteByte(lf); err != nil { + return + } + if _, err = buffered.WriteString("Hash: "); err != nil { + return + } + if _, err = buffered.WriteString(name); err != nil { + return + } + if err = buffered.WriteByte(lf); err != nil { + return + } + if err = buffered.WriteByte(lf); err != nil { + return + } + + plaintext = &dashEscaper{ + buffered: buffered, + hashers: hashers, + hashType: hashType, + toHash: toHash, + + atBeginningOfLine: true, + isFirstLine: true, + + byteBuf: make([]byte, 1), + + privateKeys: privateKeys, + config: config, + } + + return +} + +// VerifySignature checks a clearsigned message signature, and checks that the +// hash algorithm in the header matches the hash algorithm in the signature. +func (b *Block) VerifySignature(keyring openpgp.KeyRing, config *packet.Config) (signer *openpgp.Entity, err error) { + var expectedHashes []crypto.Hash + for _, v := range b.Headers { + for _, name := range v { + expectedHash := nameToHash(name) + if uint8(expectedHash) == 0 { + return nil, errors.StructuralError("unknown hash algorithm in cleartext message headers") + } + expectedHashes = append(expectedHashes, expectedHash) + } + } + if len(expectedHashes) == 0 { + expectedHashes = append(expectedHashes, crypto.MD5) + } + return openpgp.CheckDetachedSignatureAndHash(keyring, bytes.NewBuffer(b.Bytes), b.ArmoredSignature.Body, expectedHashes, config) +} + +// nameOfHash returns the OpenPGP name for the given hash, or the empty string +// if the name isn't known. See RFC 4880, section 9.4. +func nameOfHash(h crypto.Hash) string { + switch h { + case crypto.MD5: + return "MD5" + case crypto.SHA1: + return "SHA1" + case crypto.RIPEMD160: + return "RIPEMD160" + case crypto.SHA224: + return "SHA224" + case crypto.SHA256: + return "SHA256" + case crypto.SHA384: + return "SHA384" + case crypto.SHA512: + return "SHA512" + } + return "" +} + +// nameToHash returns a hash for a given OpenPGP name, or 0 +// if the name isn't known. See RFC 4880, section 9.4. +func nameToHash(h string) crypto.Hash { + switch h { + case "MD5": + return crypto.MD5 + case "SHA1": + return crypto.SHA1 + case "RIPEMD160": + return crypto.RIPEMD160 + case "SHA224": + return crypto.SHA224 + case "SHA256": + return crypto.SHA256 + case "SHA384": + return crypto.SHA384 + case "SHA512": + return crypto.SHA512 + } + return crypto.Hash(0) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go new file mode 100644 index 0000000000..0b49be4bf3 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go @@ -0,0 +1,165 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ecdh implements ECDH encryption, suitable for OpenPGP, +// as specified in RFC 6637, section 8. +package ecdh + +import ( + "bytes" + "crypto/elliptic" + "errors" + "io" + "math/big" + + "github.com/ProtonMail/go-crypto/openpgp/aes/keywrap" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" +) + +type KDF struct { + Hash algorithm.Hash + Cipher algorithm.Cipher +} + +type PublicKey struct { + ecc.CurveType + elliptic.Curve + X, Y *big.Int + KDF +} + +type PrivateKey struct { + PublicKey + D []byte +} + +func GenerateKey(c elliptic.Curve, kdf KDF, rand io.Reader) (priv *PrivateKey, err error) { + priv = new(PrivateKey) + priv.PublicKey.Curve = c + priv.PublicKey.KDF = kdf + priv.D, priv.PublicKey.X, priv.PublicKey.Y, err = elliptic.GenerateKey(c, rand) + return +} + +func Encrypt(random io.Reader, pub *PublicKey, msg, curveOID, fingerprint []byte) (vsG, c []byte, err error) { + if len(msg) > 40 { + return nil, nil, errors.New("ecdh: message too long") + } + // the sender MAY use 21, 13, and 5 bytes of padding for AES-128, + // AES-192, and AES-256, respectively, to provide the same number of + // octets, 40 total, as an input to the key wrapping method. + padding := make([]byte, 40-len(msg)) + for i := range padding { + padding[i] = byte(40 - len(msg)) + } + m := append(msg, padding...) + + if pub.CurveType == ecc.Curve25519 { + return X25519Encrypt(random, pub, m, curveOID, fingerprint) + } + + d, x, y, err := elliptic.GenerateKey(pub.Curve, random) + if err != nil { + return nil, nil, err + } + + vsG = elliptic.Marshal(pub.Curve, x, y) + zbBig, _ := pub.Curve.ScalarMult(pub.X, pub.Y, d) + + byteLen := (pub.Curve.Params().BitSize + 7) >> 3 + zb := make([]byte, byteLen) + zbBytes := zbBig.Bytes() + copy(zb[byteLen-len(zbBytes):], zbBytes) + + z, err := buildKey(pub, zb, curveOID, fingerprint, false, false) + if err != nil { + return nil, nil, err + } + + if c, err = keywrap.Wrap(z, m); err != nil { + return nil, nil, err + } + + return vsG, c, nil + +} + +func Decrypt(priv *PrivateKey, vsG, m, curveOID, fingerprint []byte) (msg []byte, err error) { + if priv.PublicKey.CurveType == ecc.Curve25519 { + return X25519Decrypt(priv, vsG, m, curveOID, fingerprint) + } + x, y := elliptic.Unmarshal(priv.Curve, vsG) + zbBig, _ := priv.Curve.ScalarMult(x, y, priv.D) + + byteLen := (priv.Curve.Params().BitSize + 7) >> 3 + zb := make([]byte, byteLen) + zbBytes := zbBig.Bytes() + copy(zb[byteLen-len(zbBytes):], zbBytes) + + z, err := buildKey(&priv.PublicKey, zb, curveOID, fingerprint, false, false) + if err != nil { + return nil, err + } + + c, err := keywrap.Unwrap(z, m) + if err != nil { + return nil, err + } + + return c[:len(c)-int(c[len(c)-1])], nil +} + +func buildKey(pub *PublicKey, zb []byte, curveOID, fingerprint []byte, stripLeading, stripTrailing bool) ([]byte, error) { + // Param = curve_OID_len || curve_OID || public_key_alg_ID || 03 + // || 01 || KDF_hash_ID || KEK_alg_ID for AESKeyWrap + // || "Anonymous Sender " || recipient_fingerprint; + param := new(bytes.Buffer) + if _, err := param.Write(curveOID); err != nil { + return nil, err + } + algKDF := []byte{18, 3, 1, pub.KDF.Hash.Id(), pub.KDF.Cipher.Id()} + if _, err := param.Write(algKDF); err != nil { + return nil, err + } + if _, err := param.Write([]byte("Anonymous Sender ")); err != nil { + return nil, err + } + // For v5 keys, the 20 leftmost octets of the fingerprint are used. + if _, err := param.Write(fingerprint[:20]); err != nil { + return nil, err + } + if param.Len() - len(curveOID) != 45 { + return nil, errors.New("ecdh: malformed KDF Param") + } + + // MB = Hash ( 00 || 00 || 00 || 01 || ZB || Param ); + h := pub.KDF.Hash.New() + if _, err := h.Write([]byte{0x0, 0x0, 0x0, 0x1}); err != nil { + return nil, err + } + zbLen := len(zb) + i := 0 + j := zbLen - 1 + if stripLeading { + // Work around old go crypto bug where the leading zeros are missing. + for ; i < zbLen && zb[i] == 0; i++ {} + } + if stripTrailing { + // Work around old OpenPGP.js bug where insignificant trailing zeros in + // this little-endian number are missing. + // (See https://github.com/openpgpjs/openpgpjs/pull/853.) + for ; j >= 0 && zb[j] == 0; j-- {} + } + if _, err := h.Write(zb[i:j+1]); err != nil { + return nil, err + } + if _, err := h.Write(param.Bytes()); err != nil { + return nil, err + } + mb := h.Sum(nil) + + return mb[:pub.KDF.Cipher.KeySize()], nil // return oBits leftmost bits of MB. + +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/x25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/x25519.go new file mode 100644 index 0000000000..15b41a31df --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/x25519.go @@ -0,0 +1,157 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ecdh implements ECDH encryption, suitable for OpenPGP, +// as specified in RFC 6637, section 8. +package ecdh + +import ( + "errors" + "io" + "math/big" + + "github.com/ProtonMail/go-crypto/openpgp/aes/keywrap" + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" + "golang.org/x/crypto/curve25519" +) + +// Generates a private-public key-pair. +// 'priv' is a private key; a scalar belonging to the set +// 2^{254} + 8 * [0, 2^{251}), in order to avoid the small subgroup of the +// curve. 'pub' is simply 'priv' * G where G is the base point. +// See https://cr.yp.to/ecdh.html and RFC7748, sec 5. +func x25519GenerateKeyPairBytes(rand io.Reader) (priv [32]byte, pub [32]byte, err error) { + var n, helper = new(big.Int), new(big.Int) + n.SetUint64(1) + n.Lsh(n, 252) + helper.SetString("27742317777372353535851937790883648493", 10) + n.Add(n, helper) + + for true { + _, err = io.ReadFull(rand, priv[:]) + if err != nil { + return + } + // The following ensures that the private key is a number of the form + // 2^{254} + 8 * [0, 2^{251}), in order to avoid the small subgroup of + // of the curve. + priv[0] &= 248 + priv[31] &= 127 + priv[31] |= 64 + + // If the scalar is out of range, sample another random number. + if new(big.Int).SetBytes(priv[:]).Cmp(n) >= 0 { + continue + } + + curve25519.ScalarBaseMult(&pub, &priv) + return + } + return +} + +// X25519GenerateKey samples the key pair according to the correct distribution. +// It also sets the given key-derivation function and returns the *PrivateKey +// object along with an error. +func X25519GenerateKey(rand io.Reader, kdf KDF) (priv *PrivateKey, err error) { + ci := ecc.FindByName("Curve25519") + priv = new(PrivateKey) + priv.PublicKey.Curve = ci.Curve + d, pubKey, err := x25519GenerateKeyPairBytes(rand) + if err != nil { + return nil, err + } + priv.PublicKey.KDF = kdf + priv.D = make([]byte, 32) + copyReversed(priv.D, d[:]) + priv.PublicKey.CurveType = ci.CurveType + priv.PublicKey.Curve = ci.Curve + /* + * Note that ECPoint.point differs from the definition of public keys in + * [Curve25519] in two ways: (1) the byte-ordering is big-endian, which is + * more uniform with how big integers are represented in TLS, and (2) there + * is an additional length byte (so ECpoint.point is actually 33 bytes), + * again for uniformity (and extensibility). + */ + var encodedKey = make([]byte, 33) + encodedKey[0] = 0x40 + copy(encodedKey[1:], pubKey[:]) + priv.PublicKey.X = new(big.Int).SetBytes(encodedKey[:]) + priv.PublicKey.Y = new(big.Int) + return priv, nil +} + +func X25519Encrypt(random io.Reader, pub *PublicKey, msg, curveOID, fingerprint []byte) (vsG, c []byte, err error) { + d, ephemeralKey, err := x25519GenerateKeyPairBytes(random) + if err != nil { + return nil, nil, err + } + var pubKey [32]byte + + if pub.X.BitLen() > 33*264 { + return nil, nil, errors.New("ecdh: invalid key") + } + copy(pubKey[:], pub.X.Bytes()[1:]) + + var zb [32]byte + curve25519.ScalarBaseMult(&zb, &d) + curve25519.ScalarMult(&zb, &d, &pubKey) + z, err := buildKey(pub, zb[:], curveOID, fingerprint, false, false) + + if err != nil { + return nil, nil, err + } + + if c, err = keywrap.Wrap(z, msg); err != nil { + return nil, nil, err + } + + var vsg [33]byte + vsg[0] = 0x40 + copy(vsg[1:], ephemeralKey[:]) + + return vsg[:], c, nil +} + +func X25519Decrypt(priv *PrivateKey, vsG, m, curveOID, fingerprint []byte) (msg []byte, err error) { + var zb, d, ephemeralKey [32]byte + if len(vsG) != 33 || vsG[0] != 0x40 { + return nil, errors.New("ecdh: invalid key") + } + copy(ephemeralKey[:], vsG[1:33]) + + copyReversed(d[:], priv.D) + curve25519.ScalarBaseMult(&zb, &d) + curve25519.ScalarMult(&zb, &d, &ephemeralKey) + + var c []byte + + for i := 0; i < 3; i++ { + // Try buildKey three times for compat, see comments in buildKey. + z, err := buildKey(&priv.PublicKey, zb[:], curveOID, fingerprint, i == 1, i == 2) + if err != nil { + return nil, err + } + + res, err := keywrap.Unwrap(z, m) + if i == 2 && err != nil { + // Only return an error after we've tried all variants of buildKey. + return nil, err + } + + c = res + if err == nil { + break + } + } + + return c[:len(c)-int(c[len(c)-1])], nil +} + +func copyReversed(out []byte, in []byte) { + l := len(in) + for i := 0; i < l; i++ { + out[i] = in[l-i-1] + } +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go new file mode 100644 index 0000000000..6a07d8ff27 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go @@ -0,0 +1,124 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package elgamal implements ElGamal encryption, suitable for OpenPGP, +// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on +// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31, +// n. 4, 1985, pp. 469-472. +// +// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it +// unsuitable for other protocols. RSA should be used in preference in any +// case. +package elgamal // import "github.com/ProtonMail/go-crypto/openpgp/elgamal" + +import ( + "crypto/rand" + "crypto/subtle" + "errors" + "io" + "math/big" +) + +// PublicKey represents an ElGamal public key. +type PublicKey struct { + G, P, Y *big.Int +} + +// PrivateKey represents an ElGamal private key. +type PrivateKey struct { + PublicKey + X *big.Int +} + +// Encrypt encrypts the given message to the given public key. The result is a +// pair of integers. Errors can result from reading random, or because msg is +// too large to be encrypted to the public key. +func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) { + pLen := (pub.P.BitLen() + 7) / 8 + if len(msg) > pLen-11 { + err = errors.New("elgamal: message too long") + return + } + + // EM = 0x02 || PS || 0x00 || M + em := make([]byte, pLen-1) + em[0] = 2 + ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):] + err = nonZeroRandomBytes(ps, random) + if err != nil { + return + } + em[len(em)-len(msg)-1] = 0 + copy(mm, msg) + + m := new(big.Int).SetBytes(em) + + k, err := rand.Int(random, pub.P) + if err != nil { + return + } + + c1 = new(big.Int).Exp(pub.G, k, pub.P) + s := new(big.Int).Exp(pub.Y, k, pub.P) + c2 = s.Mul(s, m) + c2.Mod(c2, pub.P) + + return +} + +// Decrypt takes two integers, resulting from an ElGamal encryption, and +// returns the plaintext of the message. An error can result only if the +// ciphertext is invalid. Users should keep in mind that this is a padding +// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can +// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks +// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel +// Bleichenbacher, Advances in Cryptology (Crypto '98), +func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) { + s := new(big.Int).Exp(c1, priv.X, priv.P) + if s.ModInverse(s, priv.P) == nil { + return nil, errors.New("elgamal: invalid private key") + } + s.Mul(s, c2) + s.Mod(s, priv.P) + em := s.Bytes() + + firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2) + + // The remainder of the plaintext must be a string of non-zero random + // octets, followed by a 0, followed by the message. + // lookingForIndex: 1 iff we are still looking for the zero. + // index: the offset of the first zero byte. + var lookingForIndex, index int + lookingForIndex = 1 + + for i := 1; i < len(em); i++ { + equals0 := subtle.ConstantTimeByteEq(em[i], 0) + index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index) + lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex) + } + + if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 { + return nil, errors.New("elgamal: decryption error") + } + return em[index+1:], nil +} + +// nonZeroRandomBytes fills the given slice with non-zero random octets. +func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) { + _, err = io.ReadFull(rand, s) + if err != nil { + return + } + + for i := 0; i < len(s); i++ { + for s[i] == 0 { + _, err = io.ReadFull(rand, s[i:i+1]) + if err != nil { + return + } + } + } + + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go new file mode 100644 index 0000000000..17e2bcfed2 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go @@ -0,0 +1,116 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errors contains common error types for the OpenPGP packages. +package errors // import "github.com/ProtonMail/go-crypto/openpgp/errors" + +import ( + "strconv" +) + +// A StructuralError is returned when OpenPGP data is found to be syntactically +// invalid. +type StructuralError string + +func (s StructuralError) Error() string { + return "openpgp: invalid data: " + string(s) +} + +// UnsupportedError indicates that, although the OpenPGP data is valid, it +// makes use of currently unimplemented features. +type UnsupportedError string + +func (s UnsupportedError) Error() string { + return "openpgp: unsupported feature: " + string(s) +} + +// InvalidArgumentError indicates that the caller is in error and passed an +// incorrect value. +type InvalidArgumentError string + +func (i InvalidArgumentError) Error() string { + return "openpgp: invalid argument: " + string(i) +} + +// SignatureError indicates that a syntactically valid signature failed to +// validate. +type SignatureError string + +func (b SignatureError) Error() string { + return "openpgp: invalid signature: " + string(b) +} + +var ErrMDCHashMismatch error = SignatureError("MDC hash mismatch") +var ErrMDCMissing error = SignatureError("MDC packet not found") + +type signatureExpiredError int + +func (se signatureExpiredError) Error() string { + return "openpgp: signature expired" +} + +var ErrSignatureExpired error = signatureExpiredError(0) + +type keyExpiredError int + +func (ke keyExpiredError) Error() string { + return "openpgp: key expired" +} + +var ErrKeyExpired error = keyExpiredError(0) + +type keyIncorrectError int + +func (ki keyIncorrectError) Error() string { + return "openpgp: incorrect key" +} + +var ErrKeyIncorrect error = keyIncorrectError(0) + +// KeyInvalidError indicates that the public key parameters are invalid +// as they do not match the private ones +type KeyInvalidError string + +func (e KeyInvalidError) Error() string { + return "openpgp: invalid key: " + string(e) +} + +type unknownIssuerError int + +func (unknownIssuerError) Error() string { + return "openpgp: signature made by unknown entity" +} + +var ErrUnknownIssuer error = unknownIssuerError(0) + +type keyRevokedError int + +func (keyRevokedError) Error() string { + return "openpgp: signature made by revoked key" +} + +var ErrKeyRevoked error = keyRevokedError(0) + +type UnknownPacketTypeError uint8 + +func (upte UnknownPacketTypeError) Error() string { + return "openpgp: unknown packet type: " + strconv.Itoa(int(upte)) +} + +// AEADError indicates that there is a problem when initializing or using a +// AEAD instance, configuration struct, nonces or index values. +type AEADError string + +func (ae AEADError) Error() string { + return "openpgp: aead error: " + string(ae) +} + +// ErrDummyPrivateKey results when operations are attempted on a private key +// that is just a dummy key. See +// https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;h=fe55ae16ab4e26d8356dc574c9e8bc935e71aef1;hb=23191d7851eae2217ecdac6484349849a24fd94a#l1109 +type ErrDummyPrivateKey string + +func (dke ErrDummyPrivateKey) Error() string { + return "openpgp: s2k GNU dummy key: " + string(dke) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go new file mode 100644 index 0000000000..17a1bfe9cb --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go @@ -0,0 +1,65 @@ +// Copyright (C) 2019 ProtonTech AG + +package algorithm + +import ( + "crypto/cipher" + "github.com/ProtonMail/go-crypto/eax" + "github.com/ProtonMail/go-crypto/ocb" +) + +// AEADMode defines the Authenticated Encryption with Associated Data mode of +// operation. +type AEADMode uint8 + +// Supported modes of operation (see RFC4880bis [EAX] and RFC7253) +const ( + AEADModeEAX = AEADMode(1) + AEADModeOCB = AEADMode(2) + AEADModeGCM = AEADMode(100) +) + +// TagLength returns the length in bytes of authentication tags. +func (mode AEADMode) TagLength() int { + switch mode { + case AEADModeEAX: + return 16 + case AEADModeOCB: + return 16 + case AEADModeGCM: + return 16 + default: + return 0 + } +} + +// NonceLength returns the length in bytes of nonces. +func (mode AEADMode) NonceLength() int { + switch mode { + case AEADModeEAX: + return 16 + case AEADModeOCB: + return 15 + case AEADModeGCM: + return 12 + default: + return 0 + } +} + +// New returns a fresh instance of the given mode +func (mode AEADMode) New(block cipher.Block) (alg cipher.AEAD) { + var err error + switch mode { + case AEADModeEAX: + alg, err = eax.NewEAX(block) + case AEADModeOCB: + alg, err = ocb.NewOCB(block) + case AEADModeGCM: + alg, err = cipher.NewGCM(block) + } + if err != nil { + panic(err.Error()) + } + return alg +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go new file mode 100644 index 0000000000..5760cff80e --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go @@ -0,0 +1,107 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package algorithm + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/des" + + "golang.org/x/crypto/cast5" +) + +// Cipher is an official symmetric key cipher algorithm. See RFC 4880, +// section 9.2. +type Cipher interface { + // Id returns the algorithm ID, as a byte, of the cipher. + Id() uint8 + // KeySize returns the key size, in bytes, of the cipher. + KeySize() int + // BlockSize returns the block size, in bytes, of the cipher. + BlockSize() int + // New returns a fresh instance of the given cipher. + New(key []byte) cipher.Block +} + +// The following constants mirror the OpenPGP standard (RFC 4880). +const ( + TripleDES = CipherFunction(2) + CAST5 = CipherFunction(3) + AES128 = CipherFunction(7) + AES192 = CipherFunction(8) + AES256 = CipherFunction(9) +) + +// CipherById represents the different block ciphers specified for OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 +var CipherById = map[uint8]Cipher{ + TripleDES.Id(): TripleDES, + CAST5.Id(): CAST5, + AES128.Id(): AES128, + AES192.Id(): AES192, + AES256.Id(): AES256, +} + +type CipherFunction uint8 + +// ID returns the algorithm Id, as a byte, of cipher. +func (sk CipherFunction) Id() uint8 { + return uint8(sk) +} + +var keySizeByID = map[uint8]int{ + TripleDES.Id(): 24, + CAST5.Id(): cast5.KeySize, + AES128.Id(): 16, + AES192.Id(): 24, + AES256.Id(): 32, +} + +// KeySize returns the key size, in bytes, of cipher. +func (cipher CipherFunction) KeySize() int { + switch cipher { + case TripleDES: + return 24 + case CAST5: + return cast5.KeySize + case AES128: + return 16 + case AES192: + return 24 + case AES256: + return 32 + } + return 0 +} + +// BlockSize returns the block size, in bytes, of cipher. +func (cipher CipherFunction) BlockSize() int { + switch cipher { + case TripleDES: + return des.BlockSize + case CAST5: + return 8 + case AES128, AES192, AES256: + return 16 + } + return 0 +} + +// New returns a fresh instance of the given cipher. +func (cipher CipherFunction) New(key []byte) (block cipher.Block) { + var err error + switch cipher { + case TripleDES: + block, err = des.NewTripleDESCipher(key) + case CAST5: + block, err = cast5.NewCipher(key) + case AES128, AES192, AES256: + block, err = aes.NewCipher(key) + } + if err != nil { + panic(err.Error()) + } + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go new file mode 100644 index 0000000000..3f1b61b88e --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go @@ -0,0 +1,86 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package algorithm + +import ( + "crypto" + "fmt" + "hash" +) + +// Hash is an official hash function algorithm. See RFC 4880, section 9.4. +type Hash interface { + // Id returns the algorithm ID, as a byte, of Hash. + Id() uint8 + // Available reports whether the given hash function is linked into the binary. + Available() bool + // HashFunc simply returns the value of h so that Hash implements SignerOpts. + HashFunc() crypto.Hash + // New returns a new hash.Hash calculating the given hash function. New + // panics if the hash function is not linked into the binary. + New() hash.Hash + // Size returns the length, in bytes, of a digest resulting from the given + // hash function. It doesn't require that the hash function in question be + // linked into the program. + Size() int + // String is the name of the hash function corresponding to the given + // OpenPGP hash id. + String() string +} + +// The following vars mirror the crypto/Hash supported hash functions. +var ( + MD5 Hash = cryptoHash{1, crypto.MD5} + SHA1 Hash = cryptoHash{2, crypto.SHA1} + RIPEMD160 Hash = cryptoHash{3, crypto.RIPEMD160} + SHA256 Hash = cryptoHash{8, crypto.SHA256} + SHA384 Hash = cryptoHash{9, crypto.SHA384} + SHA512 Hash = cryptoHash{10, crypto.SHA512} + SHA224 Hash = cryptoHash{11, crypto.SHA224} +) + +// HashById represents the different hash functions specified for OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-14 +var ( + HashById = map[uint8]Hash{ + MD5.Id(): MD5, + SHA1.Id(): SHA1, + RIPEMD160.Id(): RIPEMD160, + SHA256.Id(): SHA256, + SHA384.Id(): SHA384, + SHA512.Id(): SHA512, + SHA224.Id(): SHA224, + } +) + +// cryptoHash contains pairs relating OpenPGP's hash identifier with +// Go's crypto.Hash type. See RFC 4880, section 9.4. +type cryptoHash struct { + id uint8 + crypto.Hash +} + +// Id returns the algorithm ID, as a byte, of cryptoHash. +func (h cryptoHash) Id() uint8 { + return h.id +} + +var hashNames = map[uint8]string{ + MD5.Id(): "MD5", + SHA1.Id(): "SHA1", + RIPEMD160.Id(): "RIPEMD160", + SHA256.Id(): "SHA256", + SHA384.Id(): "SHA384", + SHA512.Id(): "SHA512", + SHA224.Id(): "SHA224", +} + +func (h cryptoHash) String() string { + s, ok := hashNames[h.id] + if !ok { + panic(fmt.Sprintf("Unsupported hash function %d", h.id)) + } + return s +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveInfo.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveInfo.go new file mode 100644 index 0000000000..f91042fd85 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveInfo.go @@ -0,0 +1,118 @@ +package ecc + +import ( + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" + "crypto/elliptic" + "bytes" + "github.com/ProtonMail/go-crypto/bitcurves" + "github.com/ProtonMail/go-crypto/brainpool" +) + +type SignatureAlgorithm uint8 + +const ( + ECDSA SignatureAlgorithm = 1 + EdDSA SignatureAlgorithm = 2 +) + +type CurveInfo struct { + Name string + Oid *encoding.OID + Curve elliptic.Curve + SigAlgorithm SignatureAlgorithm + CurveType CurveType +} + +var curves = []CurveInfo{ + { + Name: "NIST curve P-256", + Oid: encoding.NewOID([]byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07}), + Curve: elliptic.P256(), + CurveType: NISTCurve, + SigAlgorithm: ECDSA, + }, + { + Name: "NIST curve P-384", + Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x22}), + Curve: elliptic.P384(), + CurveType: NISTCurve, + SigAlgorithm: ECDSA, + }, + { + Name: "NIST curve P-521", + Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x23}), + Curve: elliptic.P521(), + CurveType: NISTCurve, + SigAlgorithm: ECDSA, + }, + { + Name: "SecP256k1", + Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x0A}), + Curve: bitcurves.S256(), + CurveType: BitCurve, + SigAlgorithm: ECDSA, + }, + { + Name: "Curve25519", + Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0x97, 0x55, 0x01, 0x05, 0x01}), + Curve: elliptic.P256(),// filler + CurveType: Curve25519, + SigAlgorithm: ECDSA, + }, + { + Name: "Ed25519", + Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0xDA, 0x47, 0x0F, 0x01}), + Curve: elliptic.P256(), // filler + CurveType: NISTCurve, + SigAlgorithm: EdDSA, + }, + { + Name: "Brainpool P256r1", + Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x07}), + Curve: brainpool.P256r1(), + CurveType: BrainpoolCurve, + SigAlgorithm: ECDSA, + }, + { + Name: "BrainpoolP384r1", + Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0B}), + Curve: brainpool.P384r1(), + CurveType: BrainpoolCurve, + SigAlgorithm: ECDSA, + }, + { + Name: "BrainpoolP512r1", + Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0D}), + Curve: brainpool.P512r1(), + CurveType: BrainpoolCurve, + SigAlgorithm: ECDSA, + }, +} + +func FindByCurve(curve elliptic.Curve) *CurveInfo { + for _, curveInfo := range curves { + if curveInfo.Curve == curve { + return &curveInfo + } + } + return nil +} + +func FindByOid(oid encoding.Field) *CurveInfo { + var rawBytes = oid.Bytes() + for _, curveInfo := range curves { + if bytes.Equal(curveInfo.Oid.Bytes(), rawBytes) { + return &curveInfo + } + } + return nil +} + +func FindByName(name string) *CurveInfo { + for _, curveInfo := range curves { + if curveInfo.Name == name { + return &curveInfo + } + } + return nil +} \ No newline at end of file diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveType.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveType.go new file mode 100644 index 0000000000..de8bca0ac2 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveType.go @@ -0,0 +1,10 @@ +package ecc + +type CurveType uint8 + +const ( + NISTCurve CurveType = 1 + Curve25519 CurveType = 2 + BitCurve CurveType = 3 + BrainpoolCurve CurveType = 4 +) \ No newline at end of file diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go new file mode 100644 index 0000000000..6c921481b7 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go @@ -0,0 +1,27 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package encoding implements openpgp packet field encodings as specified in +// RFC 4880 and 6637. +package encoding + +import "io" + +// Field is an encoded field of an openpgp packet. +type Field interface { + // Bytes returns the decoded data. + Bytes() []byte + + // BitLength is the size in bits of the decoded data. + BitLength() uint16 + + // EncodedBytes returns the encoded data. + EncodedBytes() []byte + + // EncodedLength is the size in bytes of the encoded data. + EncodedLength() uint16 + + // ReadFrom reads the next Field from r. + ReadFrom(r io.Reader) (int64, error) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go new file mode 100644 index 0000000000..02e5e695c3 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go @@ -0,0 +1,91 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package encoding + +import ( + "io" + "math/big" + "math/bits" +) + +// An MPI is used to store the contents of a big integer, along with the bit +// length that was specified in the original input. This allows the MPI to be +// reserialized exactly. +type MPI struct { + bytes []byte + bitLength uint16 +} + +// NewMPI returns a MPI initialized with bytes. +func NewMPI(bytes []byte) *MPI { + for len(bytes) != 0 && bytes[0] == 0 { + bytes = bytes[1:] + } + if len(bytes) == 0 { + bitLength := uint16(0) + return &MPI{bytes, bitLength} + } + bitLength := 8*uint16(len(bytes)-1) + uint16(bits.Len8(bytes[0])) + return &MPI{bytes, bitLength} +} + +// Bytes returns the decoded data. +func (m *MPI) Bytes() []byte { + return m.bytes +} + +// BitLength is the size in bits of the decoded data. +func (m *MPI) BitLength() uint16 { + return m.bitLength +} + +// EncodedBytes returns the encoded data. +func (m *MPI) EncodedBytes() []byte { + return append([]byte{byte(m.bitLength >> 8), byte(m.bitLength)}, m.bytes...) +} + +// EncodedLength is the size in bytes of the encoded data. +func (m *MPI) EncodedLength() uint16 { + return uint16(2 + len(m.bytes)) +} + +// ReadFrom reads into m the next MPI from r. +func (m *MPI) ReadFrom(r io.Reader) (int64, error) { + var buf [2]byte + n, err := io.ReadFull(r, buf[0:]) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return int64(n), err + } + + m.bitLength = uint16(buf[0])<<8 | uint16(buf[1]) + m.bytes = make([]byte, (int(m.bitLength)+7)/8) + + nn, err := io.ReadFull(r, m.bytes) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + + // remove leading zero bytes from malformed GnuPG encoded MPIs: + // https://bugs.gnupg.org/gnupg/issue1853 + // for _, b := range m.bytes { + // if b != 0 { + // break + // } + // m.bytes = m.bytes[1:] + // m.bitLength -= 8 + // } + + return int64(n) + int64(nn), err +} + +// SetBig initializes m with the bits from n. +func (m *MPI) SetBig(n *big.Int) *MPI { + m.bytes = n.Bytes() + m.bitLength = uint16(n.BitLen()) + return m +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go new file mode 100644 index 0000000000..ee39fd6bbb --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go @@ -0,0 +1,88 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package encoding + +import ( + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// OID is used to store a variable-length field with a one-octet size +// prefix. See https://tools.ietf.org/html/rfc6637#section-9. +type OID struct { + bytes []byte +} + +const ( + // maxOID is the maximum number of bytes in a OID. + maxOID = 254 + // reservedOIDLength1 and reservedOIDLength2 are OID lengths that the RFC + // specifies are reserved. + reservedOIDLength1 = 0 + reservedOIDLength2 = 0xff +) + +// NewOID returns a OID initialized with bytes. +func NewOID(bytes []byte) *OID { + switch len(bytes) { + case reservedOIDLength1, reservedOIDLength2: + panic("encoding: NewOID argument length is reserved") + default: + if len(bytes) > maxOID { + panic("encoding: NewOID argment too large") + } + } + + return &OID{ + bytes: bytes, + } +} + +// Bytes returns the decoded data. +func (o *OID) Bytes() []byte { + return o.bytes +} + +// BitLength is the size in bits of the decoded data. +func (o *OID) BitLength() uint16 { + return uint16(len(o.bytes) * 8) +} + +// EncodedBytes returns the encoded data. +func (o *OID) EncodedBytes() []byte { + return append([]byte{byte(len(o.bytes))}, o.bytes...) +} + +// EncodedLength is the size in bytes of the encoded data. +func (o *OID) EncodedLength() uint16 { + return uint16(1 + len(o.bytes)) +} + +// ReadFrom reads into b the next OID from r. +func (o *OID) ReadFrom(r io.Reader) (int64, error) { + var buf [1]byte + n, err := io.ReadFull(r, buf[:]) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return int64(n), err + } + + switch buf[0] { + case reservedOIDLength1, reservedOIDLength2: + return int64(n), errors.UnsupportedError("reserved for future extensions") + } + + o.bytes = make([]byte, buf[0]) + + nn, err := io.ReadFull(r, o.bytes) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + + return int64(n) + int64(nn), err +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go new file mode 100644 index 0000000000..0d2eb45b8e --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go @@ -0,0 +1,375 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + goerrors "errors" + "io" + "math/big" + + "github.com/ProtonMail/go-crypto/openpgp/ecdh" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "github.com/ProtonMail/go-crypto/openpgp/packet" + "golang.org/x/crypto/ed25519" +) + +// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a +// single identity composed of the given full name, comment and email, any of +// which may be empty but must not contain any of "()<>\x00". +// If config is nil, sensible defaults will be used. +func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) { + creationTime := config.Now() + keyLifetimeSecs := config.KeyLifetime() + + uid := packet.NewUserId(name, comment, email) + if uid == nil { + return nil, errors.InvalidArgumentError("user id field contained invalid characters") + } + + // Generate a primary signing key + primaryPrivRaw, err := newSigner(config) + if err != nil { + return nil, err + } + primary := packet.NewSignerPrivateKey(creationTime, primaryPrivRaw) + if config != nil && config.V5Keys { + primary.UpgradeToV5() + } + + isPrimaryId := true + selfSignature := &packet.Signature{ + Version: primary.PublicKey.Version, + SigType: packet.SigTypePositiveCert, + PubKeyAlgo: primary.PublicKey.PubKeyAlgo, + Hash: config.Hash(), + CreationTime: creationTime, + KeyLifetimeSecs: &keyLifetimeSecs, + IssuerKeyId: &primary.PublicKey.KeyId, + IssuerFingerprint: primary.PublicKey.Fingerprint, + IsPrimaryId: &isPrimaryId, + FlagsValid: true, + FlagSign: true, + FlagCertify: true, + MDC: true, // true by default, see 5.8 vs. 5.14 + AEAD: config.AEAD() != nil, + V5Keys: config != nil && config.V5Keys, + } + + // Set the PreferredHash for the SelfSignature from the packet.Config. + // If it is not the must-implement algorithm from rfc4880bis, append that. + selfSignature.PreferredHash = []uint8{hashToHashId(config.Hash())} + if config.Hash() != crypto.SHA256 { + selfSignature.PreferredHash = append(selfSignature.PreferredHash, hashToHashId(crypto.SHA256)) + } + + // Likewise for DefaultCipher. + selfSignature.PreferredSymmetric = []uint8{uint8(config.Cipher())} + if config.Cipher() != packet.CipherAES128 { + selfSignature.PreferredSymmetric = append(selfSignature.PreferredSymmetric, uint8(packet.CipherAES128)) + } + + // And for DefaultMode. + selfSignature.PreferredAEAD = []uint8{uint8(config.AEAD().Mode())} + if config.AEAD().Mode() != packet.AEADModeEAX { + selfSignature.PreferredAEAD = append(selfSignature.PreferredAEAD, uint8(packet.AEADModeEAX)) + } + + // User ID binding signature + err = selfSignature.SignUserId(uid.Id, &primary.PublicKey, primary, config) + if err != nil { + return nil, err + } + + // Generate an encryption subkey + subPrivRaw, err := newDecrypter(config) + if err != nil { + return nil, err + } + sub := packet.NewDecrypterPrivateKey(creationTime, subPrivRaw) + sub.IsSubkey = true + sub.PublicKey.IsSubkey = true + if config != nil && config.V5Keys { + sub.UpgradeToV5() + } + + // NOTE: No KeyLifetimeSecs here, but we will not return this subkey in EncryptionKey() + // if the primary/master key has expired. + subKey := Subkey{ + PublicKey: &sub.PublicKey, + PrivateKey: sub, + Sig: &packet.Signature{ + Version: primary.PublicKey.Version, + CreationTime: creationTime, + SigType: packet.SigTypeSubkeyBinding, + PubKeyAlgo: primary.PublicKey.PubKeyAlgo, + Hash: config.Hash(), + FlagsValid: true, + FlagEncryptStorage: true, + FlagEncryptCommunications: true, + IssuerKeyId: &primary.PublicKey.KeyId, + }, + } + + // Subkey binding signature + err = subKey.Sig.SignKey(subKey.PublicKey, primary, config) + if err != nil { + return nil, err + } + + return &Entity{ + PrimaryKey: &primary.PublicKey, + PrivateKey: primary, + Identities: map[string]*Identity{ + uid.Id: &Identity{ + Name: uid.Id, + UserId: uid, + SelfSignature: selfSignature, + Signatures: []*packet.Signature{selfSignature}, + }, + }, + Subkeys: []Subkey{subKey}, + }, nil +} + +// AddSigningSubkey adds a signing keypair as a subkey to the Entity. +// If config is nil, sensible defaults will be used. +func (e *Entity) AddSigningSubkey(config *packet.Config) error { + creationTime := config.Now() + keyLifetimeSecs := config.KeyLifetime() + + subPrivRaw, err := newSigner(config) + if err != nil { + return err + } + sub := packet.NewSignerPrivateKey(creationTime, subPrivRaw) + + subkey := Subkey{ + PublicKey: &sub.PublicKey, + PrivateKey: sub, + Sig: &packet.Signature{ + Version: e.PrimaryKey.Version, + CreationTime: creationTime, + KeyLifetimeSecs: &keyLifetimeSecs, + SigType: packet.SigTypeSubkeyBinding, + PubKeyAlgo: e.PrimaryKey.PubKeyAlgo, + Hash: config.Hash(), + FlagsValid: true, + FlagSign: true, + IssuerKeyId: &e.PrimaryKey.KeyId, + EmbeddedSignature: &packet.Signature{ + Version: e.PrimaryKey.Version, + CreationTime: creationTime, + SigType: packet.SigTypePrimaryKeyBinding, + PubKeyAlgo: sub.PublicKey.PubKeyAlgo, + Hash: config.Hash(), + IssuerKeyId: &e.PrimaryKey.KeyId, + }, + }, + } + if config != nil && config.V5Keys { + subkey.PublicKey.UpgradeToV5() + } + + err = subkey.Sig.EmbeddedSignature.CrossSignKey(subkey.PublicKey, e.PrimaryKey, subkey.PrivateKey, config) + if err != nil { + return err + } + + subkey.PublicKey.IsSubkey = true + subkey.PrivateKey.IsSubkey = true + if err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config); err != nil { + return err + } + + e.Subkeys = append(e.Subkeys, subkey) + return nil +} + +// AddEncryptionSubkey adds an encryption keypair as a subkey to the Entity. +// If config is nil, sensible defaults will be used. +func (e *Entity) AddEncryptionSubkey(config *packet.Config) error { + creationTime := config.Now() + keyLifetimeSecs := config.KeyLifetime() + + subPrivRaw, err := newDecrypter(config) + if err != nil { + return err + } + sub := packet.NewDecrypterPrivateKey(creationTime, subPrivRaw) + + subkey := Subkey{ + PublicKey: &sub.PublicKey, + PrivateKey: sub, + Sig: &packet.Signature{ + Version: e.PrimaryKey.Version, + CreationTime: creationTime, + KeyLifetimeSecs: &keyLifetimeSecs, + SigType: packet.SigTypeSubkeyBinding, + PubKeyAlgo: e.PrimaryKey.PubKeyAlgo, + Hash: config.Hash(), + FlagsValid: true, + FlagEncryptStorage: true, + FlagEncryptCommunications: true, + IssuerKeyId: &e.PrimaryKey.KeyId, + }, + } + if config != nil && config.V5Keys { + subkey.PublicKey.UpgradeToV5() + } + + subkey.PublicKey.IsSubkey = true + subkey.PrivateKey.IsSubkey = true + if err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config); err != nil { + return err + } + + e.Subkeys = append(e.Subkeys, subkey) + return nil +} + +// Generates a signing key +func newSigner(config *packet.Config) (signer crypto.Signer, err error) { + switch config.PublicKeyAlgorithm() { + case packet.PubKeyAlgoRSA: + bits := config.RSAModulusBits() + if bits < 1024 { + return nil, errors.InvalidArgumentError("bits must be >= 1024") + } + if config != nil && len(config.RSAPrimes) >= 2 { + primes := config.RSAPrimes[0:2] + config.RSAPrimes = config.RSAPrimes[2:] + return generateRSAKeyWithPrimes(config.Random(), 2, bits, primes) + } + return rsa.GenerateKey(config.Random(), bits) + case packet.PubKeyAlgoEdDSA: + _, priv, err := ed25519.GenerateKey(config.Random()) + if err != nil { + return nil, err + } + return &priv, nil + default: + return nil, errors.InvalidArgumentError("unsupported public key algorithm") + } +} + +// Generates an encryption/decryption key +func newDecrypter(config *packet.Config) (decrypter interface{}, err error) { + switch config.PublicKeyAlgorithm() { + case packet.PubKeyAlgoRSA: + bits := config.RSAModulusBits() + if bits < 1024 { + return nil, errors.InvalidArgumentError("bits must be >= 1024") + } + if config != nil && len(config.RSAPrimes) >= 2 { + primes := config.RSAPrimes[0:2] + config.RSAPrimes = config.RSAPrimes[2:] + return generateRSAKeyWithPrimes(config.Random(), 2, bits, primes) + } + return rsa.GenerateKey(config.Random(), bits) + case packet.PubKeyAlgoEdDSA: + fallthrough // When passing EdDSA, we generate an ECDH subkey + case packet.PubKeyAlgoECDH: + var kdf = ecdh.KDF{ + Hash: algorithm.SHA512, + Cipher: algorithm.AES256, + } + return ecdh.X25519GenerateKey(config.Random(), kdf) + default: + return nil, errors.InvalidArgumentError("unsupported public key algorithm") + } +} + +var bigOne = big.NewInt(1) + +// generateRSAKeyWithPrimes generates a multi-prime RSA keypair of the +// given bit size, using the given random source and prepopulated primes. +func generateRSAKeyWithPrimes(random io.Reader, nprimes int, bits int, prepopulatedPrimes []*big.Int) (*rsa.PrivateKey, error) { + priv := new(rsa.PrivateKey) + priv.E = 65537 + + if nprimes < 2 { + return nil, goerrors.New("generateRSAKeyWithPrimes: nprimes must be >= 2") + } + + if bits < 1024 { + return nil, goerrors.New("generateRSAKeyWithPrimes: bits must be >= 1024") + } + + primes := make([]*big.Int, nprimes) + +NextSetOfPrimes: + for { + todo := bits + // crypto/rand should set the top two bits in each prime. + // Thus each prime has the form + // p_i = 2^bitlen(p_i) × 0.11... (in base 2). + // And the product is: + // P = 2^todo × α + // where α is the product of nprimes numbers of the form 0.11... + // + // If α < 1/2 (which can happen for nprimes > 2), we need to + // shift todo to compensate for lost bits: the mean value of 0.11... + // is 7/8, so todo + shift - nprimes * log2(7/8) ~= bits - 1/2 + // will give good results. + if nprimes >= 7 { + todo += (nprimes - 2) / 5 + } + for i := 0; i < nprimes; i++ { + var err error + if len(prepopulatedPrimes) == 0 { + primes[i], err = rand.Prime(random, todo/(nprimes-i)) + if err != nil { + return nil, err + } + } else { + primes[i] = prepopulatedPrimes[0] + prepopulatedPrimes = prepopulatedPrimes[1:] + } + + todo -= primes[i].BitLen() + } + + // Make sure that primes is pairwise unequal. + for i, prime := range primes { + for j := 0; j < i; j++ { + if prime.Cmp(primes[j]) == 0 { + continue NextSetOfPrimes + } + } + } + + n := new(big.Int).Set(bigOne) + totient := new(big.Int).Set(bigOne) + pminus1 := new(big.Int) + for _, prime := range primes { + n.Mul(n, prime) + pminus1.Sub(prime, bigOne) + totient.Mul(totient, pminus1) + } + if n.BitLen() != bits { + // This should never happen for nprimes == 2 because + // crypto/rand should set the top two bits in each prime. + // For nprimes > 2 we hope it does not happen often. + continue NextSetOfPrimes + } + + priv.D = new(big.Int) + e := big.NewInt(int64(priv.E)) + ok := priv.D.ModInverse(e, totient) + + if ok != nil { + priv.Primes = primes + priv.N = n + break + } + } + + priv.Precompute() + return priv, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go new file mode 100644 index 0000000000..c3fedf7a25 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go @@ -0,0 +1,707 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + goerrors "errors" + "io" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/armor" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/packet" +) + +// PublicKeyType is the armor type for a PGP public key. +var PublicKeyType = "PGP PUBLIC KEY BLOCK" + +// PrivateKeyType is the armor type for a PGP private key. +var PrivateKeyType = "PGP PRIVATE KEY BLOCK" + +// An Entity represents the components of an OpenPGP key: a primary public key +// (which must be a signing key), one or more identities claimed by that key, +// and zero or more subkeys, which may be encryption keys. +type Entity struct { + PrimaryKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Identities map[string]*Identity // indexed by Identity.Name + Revocations []*packet.Signature + Subkeys []Subkey +} + +// An Identity represents an identity claimed by an Entity and zero or more +// assertions by other entities about that claim. +type Identity struct { + Name string // by convention, has the form "Full Name (comment) " + UserId *packet.UserId + SelfSignature *packet.Signature + Signatures []*packet.Signature +} + +// A Subkey is an additional public key in an Entity. Subkeys can be used for +// encryption. +type Subkey struct { + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Sig *packet.Signature +} + +// A Key identifies a specific public key in an Entity. This is either the +// Entity's primary key or a subkey. +type Key struct { + Entity *Entity + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + SelfSignature *packet.Signature +} + +// A KeyRing provides access to public and private keys. +type KeyRing interface { + // KeysById returns the set of keys that have the given key id. + KeysById(id uint64) []Key + // KeysByIdAndUsage returns the set of keys with the given id + // that also meet the key usage given by requiredUsage. + // The requiredUsage is expressed as the bitwise-OR of + // packet.KeyFlag* values. + KeysByIdUsage(id uint64, requiredUsage byte) []Key + // DecryptionKeys returns all private keys that are valid for + // decryption. + DecryptionKeys() []Key +} + +// PrimaryIdentity returns the Identity marked as primary or the first identity +// if none are so marked. +func (e *Entity) PrimaryIdentity() *Identity { + var firstIdentity *Identity + for _, ident := range e.Identities { + if firstIdentity == nil { + firstIdentity = ident + } + if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { + return ident + } + } + return firstIdentity +} + +// EncryptionKey returns the best candidate Key for encrypting a message to the +// given Entity. +func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { + // Fail to find any encryption key if the primary key has expired. + i := e.PrimaryIdentity() + primaryKeyExpired := e.PrimaryKey.KeyExpired(i.SelfSignature, now) + if primaryKeyExpired { + return Key{}, false + } + + // Iterate the keys to find the newest, unexpired one + candidateSubkey := -1 + var maxTime time.Time + for i, subkey := range e.Subkeys { + if subkey.Sig.FlagsValid && + subkey.Sig.FlagEncryptCommunications && + subkey.PublicKey.PubKeyAlgo.CanEncrypt() && + !subkey.PublicKey.KeyExpired(subkey.Sig, now) && + (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { + candidateSubkey = i + maxTime = subkey.Sig.CreationTime + } + } + + if candidateSubkey != -1 { + subkey := e.Subkeys[candidateSubkey] + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true + } + + // If we don't have any candidate subkeys for encryption and + // the primary key doesn't have any usage metadata then we + // assume that the primary key is ok. Or, if the primary key is + // marked as ok to encrypt with, then we can obviously use it. + // Also, check expiry again just to be safe. + if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications && + e.PrimaryKey.PubKeyAlgo.CanEncrypt() && !primaryKeyExpired { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true + } + + return Key{}, false +} + +// SigningKey return the best candidate Key for signing a message with this +// Entity. +func (e *Entity) SigningKey(now time.Time) (Key, bool) { + return e.SigningKeyById(now, 0) +} + +// SigningKeyById return the Key for signing a message with this +// Entity and keyID. +func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) { + // Fail to find any signing key if the primary key has expired. + i := e.PrimaryIdentity() + primaryKeyExpired := e.PrimaryKey.KeyExpired(i.SelfSignature, now) + if primaryKeyExpired { + return Key{}, false + } + + // Iterate the keys to find the newest, unexpired one + candidateSubkey := -1 + var maxTime time.Time + for idx, subkey := range e.Subkeys { + if subkey.Sig.FlagsValid && + subkey.Sig.FlagSign && + subkey.PublicKey.PubKeyAlgo.CanSign() && + !subkey.PublicKey.KeyExpired(subkey.Sig, now) && + (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) && + (id == 0 || subkey.PrivateKey.KeyId == id) { + candidateSubkey = idx + maxTime = subkey.Sig.CreationTime + } + } + + if candidateSubkey != -1 { + subkey := e.Subkeys[candidateSubkey] + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true + } + + // If we have no candidate subkey then we assume that it's ok to sign + // with the primary key. Or, if the primary key is marked as ok to + // sign with, then we can use it. Also, check expiry again just to be safe. + if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign && + e.PrimaryKey.PubKeyAlgo.CanSign() && !primaryKeyExpired && + (id == 0 || e.PrivateKey.KeyId == id) { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true + } + + // No keys with a valid Signing Flag or no keys matched the id passed in + return Key{}, false +} + +// An EntityList contains one or more Entities. +type EntityList []*Entity + +// KeysById returns the set of keys that have the given key id. +func (el EntityList) KeysById(id uint64) (keys []Key) { + for _, e := range el { + if e.PrimaryKey.KeyId == id { + var selfSig *packet.Signature + for _, ident := range e.Identities { + if selfSig == nil { + selfSig = ident.SelfSignature + } else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { + selfSig = ident.SelfSignature + break + } + } + keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig}) + } + + for _, subKey := range e.Subkeys { + if subKey.PublicKey.KeyId == id { + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) + } + } + } + return +} + +// KeysByIdAndUsage returns the set of keys with the given id that also meet +// the key usage given by requiredUsage. The requiredUsage is expressed as +// the bitwise-OR of packet.KeyFlag* values. +func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { + for _, key := range el.KeysById(id) { + if len(key.Entity.Revocations) > 0 { + continue + } + + if key.SelfSignature.RevocationReason != nil { + continue + } + + if key.SelfSignature.FlagsValid && requiredUsage != 0 { + var usage byte + if key.SelfSignature.FlagCertify { + usage |= packet.KeyFlagCertify + } + if key.SelfSignature.FlagSign { + usage |= packet.KeyFlagSign + } + if key.SelfSignature.FlagEncryptCommunications { + usage |= packet.KeyFlagEncryptCommunications + } + if key.SelfSignature.FlagEncryptStorage { + usage |= packet.KeyFlagEncryptStorage + } + if usage&requiredUsage != requiredUsage { + continue + } + } + + keys = append(keys, key) + } + return +} + +// DecryptionKeys returns all private keys that are valid for decryption. +func (el EntityList) DecryptionKeys() (keys []Key) { + for _, e := range el { + for _, subKey := range e.Subkeys { + if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) + } + } + } + return +} + +// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file. +func ReadArmoredKeyRing(r io.Reader) (EntityList, error) { + block, err := armor.Decode(r) + if err == io.EOF { + return nil, errors.InvalidArgumentError("no armored data found") + } + if err != nil { + return nil, err + } + if block.Type != PublicKeyType && block.Type != PrivateKeyType { + return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type) + } + + return ReadKeyRing(block.Body) +} + +// ReadKeyRing reads one or more public/private keys. Unsupported keys are +// ignored as long as at least a single valid key is found. +func ReadKeyRing(r io.Reader) (el EntityList, err error) { + packets := packet.NewReader(r) + var lastUnsupportedError error + + for { + var e *Entity + e, err = ReadEntity(packets) + if err != nil { + // TODO: warn about skipped unsupported/unreadable keys + if _, ok := err.(errors.UnsupportedError); ok { + lastUnsupportedError = err + err = readToNextPublicKey(packets) + } else if _, ok := err.(errors.StructuralError); ok { + // Skip unreadable, badly-formatted keys + lastUnsupportedError = err + err = readToNextPublicKey(packets) + } + if err == io.EOF { + err = nil + break + } + if err != nil { + el = nil + break + } + } else { + el = append(el, e) + } + } + + if len(el) == 0 && err == nil { + err = lastUnsupportedError + } + return +} + +// readToNextPublicKey reads packets until the start of the entity and leaves +// the first packet of the new entity in the Reader. +func readToNextPublicKey(packets *packet.Reader) (err error) { + var p packet.Packet + for { + p, err = packets.Next() + if err == io.EOF { + return + } else if err != nil { + if _, ok := err.(errors.UnsupportedError); ok { + err = nil + continue + } + return + } + + if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey { + packets.Unread(p) + return + } + } +} + +// ReadEntity reads an entity (public key, identities, subkeys etc) from the +// given Reader. +func ReadEntity(packets *packet.Reader) (*Entity, error) { + e := new(Entity) + e.Identities = make(map[string]*Identity) + + p, err := packets.Next() + if err != nil { + return nil, err + } + + var ok bool + if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok { + if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok { + packets.Unread(p) + return nil, errors.StructuralError("first packet was not a public/private key") + } + e.PrimaryKey = &e.PrivateKey.PublicKey + } + + if !e.PrimaryKey.PubKeyAlgo.CanSign() { + return nil, errors.StructuralError("primary key cannot be used for signatures") + } + + var revocations []*packet.Signature +EachPacket: + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + + switch pkt := p.(type) { + case *packet.UserId: + if err := addUserID(e, packets, pkt); err != nil { + return nil, err + } + case *packet.Signature: + if pkt.SigType == packet.SigTypeKeyRevocation { + revocations = append(revocations, pkt) + } else if pkt.SigType == packet.SigTypeDirectSignature { + // TODO: RFC4880 5.2.1 permits signatures + // directly on keys (eg. to bind additional + // revocation keys). + } + // Else, ignoring the signature as it does not follow anything + // we would know to attach it to. + case *packet.PrivateKey: + if pkt.IsSubkey == false { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, &pkt.PublicKey, pkt) + if err != nil { + return nil, err + } + case *packet.PublicKey: + if pkt.IsSubkey == false { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, pkt, nil) + if err != nil { + return nil, err + } + default: + // we ignore unknown packets + } + } + + if len(e.Identities) == 0 { + return nil, errors.StructuralError("entity without any identities") + } + + for _, revocation := range revocations { + err = e.PrimaryKey.VerifyRevocationSignature(revocation) + if err == nil { + e.Revocations = append(e.Revocations, revocation) + } else { + // TODO: RFC 4880 5.2.3.15 defines revocation keys. + return nil, errors.StructuralError("revocation signature signed by alternate key") + } + } + + return e, nil +} + +func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error { + // Make a new Identity object, that we might wind up throwing away. + // We'll only add it if we get a valid self-signature over this + // userID. + identity := new(Identity) + identity.Name = pkt.Id + identity.UserId = pkt + + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return err + } + + sig, ok := p.(*packet.Signature) + if !ok { + packets.Unread(p) + break + } + + if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.CheckKeyIdOrFingerprint(e.PrimaryKey) { + if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil { + return errors.StructuralError("user ID self-signature invalid: " + err.Error()) + } + if identity.SelfSignature == nil || sig.CreationTime.After(identity.SelfSignature.CreationTime) { + identity.SelfSignature = sig + } + identity.Signatures = append(identity.Signatures, sig) + e.Identities[pkt.Id] = identity + } else { + identity.Signatures = append(identity.Signatures, sig) + } + } + + return nil +} + +func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error { + var subKey Subkey + subKey.PublicKey = pub + subKey.PrivateKey = priv + + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return errors.StructuralError("subkey signature invalid: " + err.Error()) + } + + sig, ok := p.(*packet.Signature) + if !ok { + packets.Unread(p) + break + } + + if sig.SigType != packet.SigTypeSubkeyBinding && sig.SigType != packet.SigTypeSubkeyRevocation { + return errors.StructuralError("subkey signature with wrong type") + } + + if err := e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig); err != nil { + return errors.StructuralError("subkey signature invalid: " + err.Error()) + } + + switch sig.SigType { + case packet.SigTypeSubkeyRevocation: + subKey.Sig = sig + case packet.SigTypeSubkeyBinding: + if shouldReplaceSubkeySig(subKey.Sig, sig) { + subKey.Sig = sig + } + } + } + + if subKey.Sig == nil { + return errors.StructuralError("subkey packet not followed by signature") + } + + e.Subkeys = append(e.Subkeys, subKey) + + return nil +} + +func shouldReplaceSubkeySig(existingSig, potentialNewSig *packet.Signature) bool { + if potentialNewSig == nil { + return false + } + + if existingSig == nil { + return true + } + + if existingSig.SigType == packet.SigTypeSubkeyRevocation { + return false // never override a revocation signature + } + + return potentialNewSig.CreationTime.After(existingSig.CreationTime) +} + +// SerializePrivate serializes an Entity, including private key material, but +// excluding signatures from other entities, to the given Writer. +// Identities and subkeys are re-signed in case they changed since NewEntry. +// If config is nil, sensible defaults will be used. +func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) { + if e.PrivateKey.Dummy() { + return errors.ErrDummyPrivateKey("dummy private key cannot re-sign identities") + } + return e.serializePrivate(w, config, true) +} + +// SerializePrivateWithoutSigning serializes an Entity, including private key +// material, but excluding signatures from other entities, to the given Writer. +// Self-signatures of identities and subkeys are not re-signed. This is useful +// when serializing GNU dummy keys, among other things. +// If config is nil, sensible defaults will be used. +func (e *Entity) SerializePrivateWithoutSigning(w io.Writer, config *packet.Config) (err error) { + return e.serializePrivate(w, config, false) +} + +func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign bool) (err error) { + if e.PrivateKey == nil { + return goerrors.New("openpgp: private key is missing") + } + err = e.PrivateKey.Serialize(w) + if err != nil { + return + } + for _, ident := range e.Identities { + err = ident.UserId.Serialize(w) + if err != nil { + return + } + if reSign { + err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) + if err != nil { + return + } + } + err = ident.SelfSignature.Serialize(w) + if err != nil { + return + } + } + for _, subkey := range e.Subkeys { + err = subkey.PrivateKey.Serialize(w) + if err != nil { + return + } + if reSign { + err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) + if err != nil { + return + } + if subkey.Sig.EmbeddedSignature != nil { + err = subkey.Sig.EmbeddedSignature.CrossSignKey(subkey.PublicKey, e.PrimaryKey, + subkey.PrivateKey, config) + if err != nil { + return + } + } + } + err = subkey.Sig.Serialize(w) + if err != nil { + return + } + } + return nil +} + +// Serialize writes the public part of the given Entity to w, including +// signatures from other entities. No private key material will be output. +func (e *Entity) Serialize(w io.Writer) error { + err := e.PrimaryKey.Serialize(w) + if err != nil { + return err + } + for _, ident := range e.Identities { + err = ident.UserId.Serialize(w) + if err != nil { + return err + } + for _, sig := range ident.Signatures { + err = sig.Serialize(w) + if err != nil { + return err + } + } + } + for _, subkey := range e.Subkeys { + err = subkey.PublicKey.Serialize(w) + if err != nil { + return err + } + err = subkey.Sig.Serialize(w) + if err != nil { + return err + } + } + return nil +} + +// SignIdentity adds a signature to e, from signer, attesting that identity is +// associated with e. The provided identity must already be an element of +// e.Identities and the private key of signer must have been decrypted if +// necessary. +// If config is nil, sensible defaults will be used. +func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error { + if signer.PrivateKey == nil { + return errors.InvalidArgumentError("signing Entity must have a private key") + } + if signer.PrivateKey.Encrypted { + return errors.InvalidArgumentError("signing Entity's private key must be decrypted") + } + ident, ok := e.Identities[identity] + if !ok { + return errors.InvalidArgumentError("given identity string not found in Entity") + } + + sig := &packet.Signature{ + Version: signer.PrivateKey.Version, + SigType: packet.SigTypeGenericCert, + PubKeyAlgo: signer.PrivateKey.PubKeyAlgo, + Hash: config.Hash(), + CreationTime: config.Now(), + IssuerKeyId: &signer.PrivateKey.KeyId, + } + if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil { + return err + } + ident.Signatures = append(ident.Signatures, sig) + return nil +} + +// RevokeKey generates a key revocation signature (packet.SigTypeKeyRevocation) with the +// specified reason code and text (RFC4880 section-5.2.3.23). +// If config is nil, sensible defaults will be used. +func (e *Entity) RevokeKey(reason packet.ReasonForRevocation, reasonText string, config *packet.Config) error { + reasonCode := uint8(reason) + revSig := &packet.Signature{ + Version: e.PrimaryKey.Version, + CreationTime: config.Now(), + SigType: packet.SigTypeKeyRevocation, + PubKeyAlgo: packet.PubKeyAlgoRSA, + Hash: config.Hash(), + RevocationReason: &reasonCode, + RevocationReasonText: reasonText, + IssuerKeyId: &e.PrimaryKey.KeyId, + } + + if err := revSig.RevokeKey(e.PrimaryKey, e.PrivateKey, config); err != nil { + return err + } + e.Revocations = append(e.Revocations, revSig) + return nil +} + +// RevokeSubkey generates a subkey revocation signature (packet.SigTypeSubkeyRevocation) for +// a subkey with the specified reason code and text (RFC4880 section-5.2.3.23). +// If config is nil, sensible defaults will be used. +func (e *Entity) RevokeSubkey(sk *Subkey, reason packet.ReasonForRevocation, reasonText string, config *packet.Config) error { + if err := e.PrimaryKey.VerifyKeySignature(sk.PublicKey, sk.Sig); err != nil { + return errors.InvalidArgumentError("given subkey is not associated with this key") + } + + reasonCode := uint8(reason) + revSig := &packet.Signature{ + Version: e.PrimaryKey.Version, + CreationTime: config.Now(), + SigType: packet.SigTypeSubkeyRevocation, + PubKeyAlgo: packet.PubKeyAlgoRSA, + Hash: config.Hash(), + RevocationReason: &reasonCode, + RevocationReasonText: reasonText, + IssuerKeyId: &e.PrimaryKey.KeyId, + } + + if err := revSig.RevokeKey(sk.PublicKey, e.PrivateKey, config); err != nil { + return err + } + + sk.Sig = revSig + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go new file mode 100644 index 0000000000..21d17c2f8b --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go @@ -0,0 +1,336 @@ +package openpgp + +const expiringKeyHex = "c6c04d0451d0c680010800abbb021fd03ffc4e96618901180c3fdcb060ee69eeead97b91256d11420d80b5f1b51930248044130bd300605cf8a05b7a40d3d8cfb0a910be2e3db50dcd50a9c54064c2a5550801daa834ff4480b33d3d3ca495ff8a4e84a886977d17d998f881241a874083d8b995beab555b6d22b8a4817ab17ac3e7304f7d4d2c05c495fb2218348d3bc13651db1d92732e368a9dd7dcefa6eddff30b94706a9aaee47e9d39321460b740c59c6fc3c2fd8ab6c0fb868cb87c0051f0321301fe0f0e1820b15e7fb7063395769b525005c7e30a7ce85984f5cac00504e7b4fdc45d74958de8388436fd5c7ba9ea121f1c851b5911dd1b47a14d81a09e92ef37721e2325b6790011010001cd00c2c07b041001080025050251d0c680050900278d00060b09070803020415080a0203160201021901021b03021e01000a0910e7b484133a890a35ae4b0800a1beb82e7f28eaf5273d6af9d3391314f6280b2b624eaca2851f89a9ebcaf80ac589ebd509f168bc4322106ca2e2ce77a76e071a3c7444787d65216b5f05e82c77928860b92aace3b7d0327db59492f422eb9dfab7249266d37429870b091a98aba8724c2259ebf8f85093f21255eafa75aa841e31d94f2ac891b9755fed455e539044ee69fc47950b80e003fc9f298d695660f28329eaa38037c367efde1727458e514faf990d439a21461b719edaddf9296d3d0647b43ca56cb8dbf63b4fcf8b9968e7928c463470fab3b98e44d0d95645062f94b2d04fe56bd52822b71934db8ce845622c40b92fcbe765a142e7f38b61a6aa9606c8e8858dcd3b6eb1894acec04d0451d1f06b01080088bea67444e1789390e7c0335c86775502d58ec783d99c8ef4e06de235ed3dd4b0467f6f358d818c7d8989d43ec6d69fcbc8c32632d5a1b605e3fa8e41d695fcdcaa535936cd0157f9040dce362519803b908eafe838bb13216c885c6f93e9e8d5745607f0d062322085d6bdc760969149a8ff8dd9f5c18d9bfe2e6f63a06e17694cf1f67587c6fb70e9aebf90ffc528ca3b615ac7c9d4a21ea4f7c06f2e98fbbd90a859b8608bf9ea638e3a54289ce44c283110d0c45fa458de6251cd6e7baf71f80f12c8978340490fd90c92b81736ae902ed958e478dceae2835953d189c45d182aff02ea2be61b81d8e94430f041d638647b43e2fcb45fd512fbf5068b810011010001c2c06504180108000f050251d1f06b050900081095021b0c000a0910e7b484133a890a35e63407fe2ec88d6d1e6c9ce7553ece0cb2524747217bad29f251d33df84599ffcc900141a355abd62126800744068a5e05dc167056aa9205273dc7765a2ed49db15c2a83b8d6e6429c902136f1e12229086c1c10c0053242c2a4ae1930db58163387a48cad64607ff2153c320e42843dec28e3fce90e7399d63ac0affa2fee1f0adc0953c89eb3f46ef1d6c04328ed13b491669d5120a3782e3ffb7c69575fb77eebd108794f4dda9d34be2bae57e8e59ec8ebfda2f6f06104b2321be408ea146e2db482b00c5055c8618de36ac9716f80da2617e225556d0fce61b01c8cea2d1e0ea982c31711060ca370f2739366e1e708f38405d784b49d16a26cf62d152eae734327cec04d0451d1f07b010800d5af91c5e7c2fd8951c8d254eab0c97cdcb66822f868b79b78c366255059a68fd74ebca9adb9b970cd9e586690e6e0756705432306878c897b10a4b4ca0005966f99ac8fa4e6f9caf54bf8e53844544beee9872a7ac64c119cf1393d96e674254b661f61ee975633d0e8a8672531edb6bb8e211204e7754a9efa802342118eee850beea742bac95a3f706cc2024cf6037a308bb68162b2f53b9a6346a96e6d31871a2456186e24a1c7a82b82ac04afdfd57cd7fb9ba77a9c760d40b76a170f7be525e5fb6a9848cc726e806187710d9b190387df28700f321f988a392899f93815cc937f309129eb94d5299c5547cb2c085898e6639496e70d746c9d3fb9881d0011010001c2c06504180108000f050251d1f07b050900266305021b0c000a0910e7b484133a890a35bff207fd10dfe8c4a6ea1dd30568012b6fd6891a763c87ad0f7a1d112aad9e8e3239378a3b85588c235865bac2e614348cb4f216d7217f53b3ef48c192e0a4d31d64d7bfa5faccf21155965fa156e887056db644a05ad08a85cc6152d1377d9e37b46f4ff462bbe68ace2dc586ef90070314576c985d8037c2ba63f0a7dc17a62e15bd77e88bc61d9d00858979709f12304264a4cf4225c5cf86f12c8e19486cb9cdcc69f18f027e5f16f4ca8b50e28b3115eaff3a345acd21f624aef81f6ede515c1b55b26b84c1e32264754eab672d5489b287e7277ea855e0a5ff2aa9e8b8c76d579a964ec225255f4d57bf66639ccb34b64798846943e162a41096a7002ca21c7f56" +const subkeyUsageHex = "988d04533a52bc010400d26af43085558f65b9e7dbc90cb9238015259aed5e954637adcfa2181548b2d0b60c65f1f42ec5081cbf1bc0a8aa4900acfb77070837c58f26012fbce297d70afe96e759ad63531f0037538e70dbf8e384569b9720d99d8eb39d8d0a2947233ed242436cb6ac7dfe74123354b3d0119b5c235d3dd9c9d6c004f8ffaf67ad8583001101000188b7041f010200210502533b8552170c8001ce094aa433f7040bb2ddf0be3893cb843d0fe70c020700000a0910a42704b92866382aa98404009d63d916a27543da4221c60087c33f1c44bec9998c5438018ed370cca4962876c748e94b73eb39c58eb698063f3fd6346d58dd2a11c0247934c4a9d71f24754f7468f96fb24c3e791dd2392b62f626148ad724189498cbf993db2df7c0cdc2d677c35da0f16cb16c9ce7c33b4de65a4a91b1d21a130ae9cc26067718910ef8e2b417556d627261203c756d627261407379642e65642e61753e88b80413010200220502533a52bc021b03060b090807030206150802090a0b0416020301021e01021780000a0910a42704b92866382a47840400c0c2bd04f5fca586de408b395b3c280a278259c93eaaa8b79a53b97003f8ed502a8a00446dd9947fb462677e4fcac0dac2f0701847d15130aadb6cd9e0705ea0cf5f92f129136c7be21a718d46c8e641eb7f044f2adae573e11ae423a0a9ca51324f03a8a2f34b91fa40c3cc764bee4dccadedb54c768ba0469b683ea53f1c29b88d04533a52bc01040099c92a5d6f8b744224da27bc2369127c35269b58bec179de6bbc038f749344222f85a31933224f26b70243c4e4b2d242f0c4777eaef7b5502f9dad6d8bf3aaeb471210674b74de2d7078af497d55f5cdad97c7bedfbc1b41e8065a97c9c3d344b21fc81d27723af8e374bc595da26ea242dccb6ae497be26eea57e563ed517e90011010001889f0418010200090502533a52bc021b0c000a0910a42704b92866382afa1403ff70284c2de8a043ff51d8d29772602fa98009b7861c540535f874f2c230af8caf5638151a636b21f8255003997ccd29747fdd06777bb24f9593bd7d98a3e887689bf902f999915fcc94625ae487e5d13e6616f89090ebc4fdc7eb5cad8943e4056995bb61c6af37f8043016876a958ec7ebf39c43d20d53b7f546cfa83e8d2604b88d04533b8283010400c0b529316dbdf58b4c54461e7e669dc11c09eb7f73819f178ccd4177b9182b91d138605fcf1e463262fabefa73f94a52b5e15d1904635541c7ea540f07050ce0fb51b73e6f88644cec86e91107c957a114f69554548a85295d2b70bd0b203992f76eb5d493d86d9eabcaa7ef3fc7db7e458438db3fcdb0ca1cc97c638439a9170011010001889f0418010200090502533b8283021b0c000a0910a42704b92866382adc6d0400cfff6258485a21675adb7a811c3e19ebca18851533f75a7ba317950b9997fda8d1a4c8c76505c08c04b6c2cc31dc704d33da36a21273f2b388a1a706f7c3378b66d887197a525936ed9a69acb57fe7f718133da85ec742001c5d1864e9c6c8ea1b94f1c3759cebfd93b18606066c063a63be86085b7e37bdbc65f9a915bf084bb901a204533b85cd110400aed3d2c52af2b38b5b67904b0ef73d6dd7aef86adb770e2b153cd22489654dcc91730892087bb9856ae2d9f7ed1eb48f214243fe86bfe87b349ebd7c30e630e49c07b21fdabf78b7a95c8b7f969e97e3d33f2e074c63552ba64a2ded7badc05ce0ea2be6d53485f6900c7860c7aa76560376ce963d7271b9b54638a4028b573f00a0d8854bfcdb04986141568046202192263b9b67350400aaa1049dbc7943141ef590a70dcb028d730371d92ea4863de715f7f0f16d168bd3dc266c2450457d46dcbbf0b071547e5fbee7700a820c3750b236335d8d5848adb3c0da010e998908dfd93d961480084f3aea20b247034f8988eccb5546efaa35a92d0451df3aaf1aee5aa36a4c4d462c760ecd9cebcabfbe1412b1f21450f203fd126687cd486496e971a87fd9e1a8a765fe654baa219a6871ab97768596ab05c26c1aeea8f1a2c72395a58dbc12ef9640d2b95784e974a4d2d5a9b17c25fedacfe551bda52602de8f6d2e48443f5dd1a2a2a8e6a5e70ecdb88cd6e766ad9745c7ee91d78cc55c3d06536b49c3fee6c3d0b6ff0fb2bf13a314f57c953b8f4d93bf88e70418010200090502533b85cd021b0200520910a42704b92866382a47200419110200060502533b85cd000a091042ce2c64bc0ba99214b2009e26b26852c8b13b10c35768e40e78fbbb48bd084100a0c79d9ea0844fa5853dd3c85ff3ecae6f2c9dd6c557aa04008bbbc964cd65b9b8299d4ebf31f41cc7264b8cf33a00e82c5af022331fac79efc9563a822497ba012953cefe2629f1242fcdcb911dbb2315985bab060bfd58261ace3c654bdbbe2e8ed27a46e836490145c86dc7bae15c011f7e1ffc33730109b9338cd9f483e7cef3d2f396aab5bd80efb6646d7e778270ee99d934d187dd98" +const revokedKeyHex = "988d045331ce82010400c4fdf7b40a5477f206e6ee278eaef888ca73bf9128a9eef9f2f1ddb8b7b71a4c07cfa241f028a04edb405e4d916c61d6beabc333813dc7b484d2b3c52ee233c6a79b1eea4e9cc51596ba9cd5ac5aeb9df62d86ea051055b79d03f8a4fa9f38386f5bd17529138f3325d46801514ea9047977e0829ed728e68636802796801be10011010001889f04200102000905025331d0e3021d03000a0910a401d9f09a34f7c042aa040086631196405b7e6af71026b88e98012eab44aa9849f6ef3fa930c7c9f23deaedba9db1538830f8652fb7648ec3fcade8dbcbf9eaf428e83c6cbcc272201bfe2fbb90d41963397a7c0637a1a9d9448ce695d9790db2dc95433ad7be19eb3de72dacf1d6db82c3644c13eae2a3d072b99bb341debba012c5ce4006a7d34a1f4b94b444526567205265766f6b657220283c52656727732022424d204261726973746122204b657920262530305c303e5c29203c72656740626d626172697374612e636f2e61753e88b704130102002205025331ce82021b03060b090807030206150802090a0b0416020301021e01021780000a0910a401d9f09a34f7c0019c03f75edfbeb6a73e7225ad3cc52724e2872e04260d7daf0d693c170d8c4b243b8767bc7785763533febc62ec2600c30603c433c095453ede59ff2fcabeb84ce32e0ed9d5cf15ffcbc816202b64370d4d77c1e9077d74e94a16fb4fa2e5bec23a56d7a73cf275f91691ae1801a976fcde09e981a2f6327ac27ea1fecf3185df0d56889c04100102000605025331cfb5000a0910fe9645554e8266b64b4303fc084075396674fb6f778d302ac07cef6bc0b5d07b66b2004c44aef711cbac79617ef06d836b4957522d8772dd94bf41a2f4ac8b1ee6d70c57503f837445a74765a076d07b829b8111fc2a918423ddb817ead7ca2a613ef0bfb9c6b3562aec6c3cf3c75ef3031d81d95f6563e4cdcc9960bcb386c5d757b104fcca5fe11fc709df884604101102000605025331cfe7000a09107b15a67f0b3ddc0317f6009e360beea58f29c1d963a22b962b80788c3fa6c84e009d148cfde6b351469b8eae91187eff07ad9d08fcaab88d045331ce820104009f25e20a42b904f3fa555530fe5c46737cf7bd076c35a2a0d22b11f7e0b61a69320b768f4a80fe13980ce380d1cfc4a0cd8fbe2d2e2ef85416668b77208baa65bf973fe8e500e78cc310d7c8705cdb34328bf80e24f0385fce5845c33bc7943cf6b11b02348a23da0bf6428e57c05135f2dc6bd7c1ce325d666d5a5fd2fd5e410011010001889f04180102000905025331ce82021b0c000a0910a401d9f09a34f7c0418003fe34feafcbeaef348a800a0d908a7a6809cc7304017d820f70f0474d5e23cb17e38b67dc6dca282c6ca00961f4ec9edf2738d0f087b1d81e4871ef08e1798010863afb4eac4c44a376cb343be929c5be66a78cfd4456ae9ec6a99d97f4e1c3ff3583351db2147a65c0acef5c003fb544ab3a2e2dc4d43646f58b811a6c3a369d1f" +const revokedSubkeyHex = "988d04533121f6010400aefc803a3e4bb1a61c86e8a86d2726c6a43e0079e9f2713f1fa017e9854c83877f4aced8e331d675c67ea83ddab80aacbfa0b9040bb12d96f5a3d6be09455e2a76546cbd21677537db941cab710216b6d24ec277ee0bd65b910f416737ed120f6b93a9d3b306245c8cfd8394606fdb462e5cf43c551438d2864506c63367fc890011010001b41d416c696365203c616c69636540626d626172697374612e636f2e61753e88bb041301020025021b03060b090807030206150802090a0b0416020301021e01021780050253312798021901000a09104ef7e4beccde97f015a803ff5448437780f63263b0df8442a995e7f76c221351a51edd06f2063d8166cf3157aada4923dfc44aa0f2a6a4da5cf83b7fe722ba8ab416c976e77c6b5682e7f1069026673bd0de56ba06fd5d7a9f177607f277d9b55ff940a638c3e68525c67517e2b3d976899b93ca267f705b3e5efad7d61220e96b618a4497eab8d04403d23f8846041011020006050253312910000a09107b15a67f0b3ddc03d96e009f50b6365d86c4be5d5e9d0ea42d5e56f5794c617700a0ab274e19c2827780016d23417ce89e0a2c0d987d889c04100102000605025331cf7a000a0910a401d9f09a34f7c0ee970400aca292f213041c9f3b3fc49148cbda9d84afee6183c8dd6c5ff2600b29482db5fecd4303797be1ee6d544a20a858080fec43412061c9a71fae4039fd58013b4ae341273e6c66ad4c7cdd9e68245bedb260562e7b166f2461a1032f2b38c0e0e5715fb3d1656979e052b55ca827a76f872b78a9fdae64bc298170bfcebedc1271b41a416c696365203c616c696365407379646973702e6f722e61753e88b804130102002205025331278b021b03060b090807030206150802090a0b0416020301021e01021780000a09104ef7e4beccde97f06a7003fa03c3af68d272ebc1fa08aa72a03b02189c26496a2833d90450801c4e42c5b5f51ad96ce2d2c9cef4b7c02a6a2fcf1412d6a2d486098eb762f5010a201819c17fd2888aec8eda20c65a3b75744de7ee5cc8ac7bfc470cbe3cb982720405a27a3c6a8c229cfe36905f881b02ed5680f6a8f05866efb9d6c5844897e631deb949ca8846041011020006050253312910000a09107b15a67f0b3ddc0347bc009f7fa35db59147469eb6f2c5aaf6428accb138b22800a0caa2f5f0874bacc5909c652a57a31beda65eddd5889c04100102000605025331cf7a000a0910a401d9f09a34f7c0316403ff46f2a5c101256627f16384d34a38fb47a6c88ba60506843e532d91614339fccae5f884a5741e7582ffaf292ba38ee10a270a05f139bde3814b6a077e8cd2db0f105ebea2a83af70d385f13b507fac2ad93ff79d84950328bb86f3074745a8b7f9b64990fb142e2a12976e27e8d09a28dc5621f957ac49091116da410ac3cbde1b88d04533121f6010400cbd785b56905e4192e2fb62a720727d43c4fa487821203cf72138b884b78b701093243e1d8c92a0248a6c0203a5a88693da34af357499abacaf4b3309c640797d03093870a323b4b6f37865f6eaa2838148a67df4735d43a90ca87942554cdf1c4a751b1e75f9fd4ce4e97e278d6c1c7ed59d33441df7d084f3f02beb68896c70011010001889f0418010200090502533121f6021b0c000a09104ef7e4beccde97f0b98b03fc0a5ccf6a372995835a2f5da33b282a7d612c0ab2a97f59cf9fff73e9110981aac2858c41399afa29624a7fd8a0add11654e3d882c0fd199e161bdad65e5e2548f7b68a437ea64293db1246e3011cbb94dc1bcdeaf0f2539bd88ff16d95547144d97cead6a8c5927660a91e6db0d16eb36b7b49a3525b54d1644e65599b032b7eb901a204533127a0110400bd3edaa09eff9809c4edc2c2a0ebe52e53c50a19c1e49ab78e6167bf61473bb08f2050d78a5cbbc6ed66aff7b42cd503f16b4a0b99fa1609681fca9b7ce2bbb1a5b3864d6cdda4d7ef7849d156d534dea30fb0efb9e4cf8959a2b2ce623905882d5430b995a15c3b9fe92906086788b891002924f94abe139b42cbbfaaabe42f00a0b65dc1a1ad27d798adbcb5b5ad02d2688c89477b03ff4eebb6f7b15a73b96a96bed201c0e5e4ea27e4c6e2dd1005b94d4b90137a5b1cf5e01c6226c070c4cc999938101578877ee76d296b9aab8246d57049caacf489e80a3f40589cade790a020b1ac146d6f7a6241184b8c7fcde680eae3188f5dcbe846d7f7bdad34f6fcfca08413e19c1d5df83fc7c7c627d493492e009c2f52a80400a2fe82de87136fd2e8845888c4431b032ba29d9a29a804277e31002a8201fb8591a3e55c7a0d0881496caf8b9fb07544a5a4879291d0dc026a0ea9e5bd88eb4aa4947bbd694b25012e208a250d65ddc6f1eea59d3aed3b4ec15fcab85e2afaa23a40ab1ef9ce3e11e1bc1c34a0e758e7aa64deb8739276df0af7d4121f834a9b88e70418010200090502533127a0021b02005209104ef7e4beccde97f047200419110200060502533127a0000a0910dbce4ee19529437fe045009c0b32f5ead48ee8a7e98fac0dea3d3e6c0e2c552500a0ad71fadc5007cfaf842d9b7db3335a8cdad15d3d1a6404009b08e2c68fe8f3b45c1bb72a4b3278cdf3012aa0f229883ad74aa1f6000bb90b18301b2f85372ca5d6b9bf478d235b733b1b197d19ccca48e9daf8e890cb64546b4ce1b178faccfff07003c172a2d4f5ebaba9f57153955f3f61a9b80a4f5cb959908f8b211b03b7026a8a82fc612bfedd3794969bcf458c4ce92be215a1176ab88d045331d144010400a5063000c5aaf34953c1aa3bfc95045b3aab9882b9a8027fecfe2142dc6b47ba8aca667399990244d513dd0504716908c17d92c65e74219e004f7b83fc125e575dd58efec3ab6dd22e3580106998523dea42ec75bf9aa111734c82df54630bebdff20fe981cfc36c76f865eb1c2fb62c9e85bc3a6e5015a361a2eb1c8431578d0011010001889f04280102000905025331d433021d03000a09104ef7e4beccde97f02e5503ff5e0630d1b65291f4882b6d40a29da4616bb5088717d469fbcc3648b8276de04a04988b1f1b9f3e18f52265c1f8b6c85861691c1a6b8a3a25a1809a0b32ad330aec5667cb4262f4450649184e8113849b05e5ad06a316ea80c001e8e71838190339a6e48bbde30647bcf245134b9a97fa875c1d83a9862cae87ffd7e2c4ce3a1b89013d04180102000905025331d144021b0200a809104ef7e4beccde97f09d2004190102000605025331d144000a0910677815e371c2fd23522203fe22ab62b8e7a151383cea3edd3a12995693911426f8ccf125e1f6426388c0010f88d9ca7da2224aee8d1c12135998640c5e1813d55a93df472faae75bef858457248db41b4505827590aeccf6f9eb646da7f980655dd3050c6897feddddaca90676dee856d66db8923477d251712bb9b3186b4d0114daf7d6b59272b53218dd1da94a03ff64006fcbe71211e5daecd9961fba66cdb6de3f914882c58ba5beddeba7dcb950c1156d7fba18c19ea880dccc800eae335deec34e3b84ac75ffa24864f782f87815cda1c0f634b3dd2fa67cea30811d21723d21d9551fa12ccbcfa62b6d3a15d01307b99925707992556d50065505b090aadb8579083a20fe65bd2a270da9b011" + +const missingCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Charset: UTF-8 + +mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY +ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG +zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54 +QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ +QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo +9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu +Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/ +dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R +JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL +ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew +RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW +/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu +yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAJcXQeP+NmuciE99YcJoffxv +2gVLU4ZXBNHEaP0mgaJ1+tmMD089vUQAcyGRvw8jfsNsVZQIOAuRxY94aHQhIRHR +bUzBN28ofo/AJJtfx62C15xt6fDKRV6HXYqAiygrHIpEoRLyiN69iScUsjIJeyFL +C8wa72e8pSL6dkHoaV1N9ZH/xmrJ+k0vsgkQaAh9CzYufncDxcwkoP+aOlGtX1gP +WwWoIbz0JwLEMPHBWvDDXQcQPQTYQyj+LGC9U6f9VZHN25E94subM1MjuT9OhN9Y +MLfWaaIc5WyhLFyQKW2Upofn9wSFi8ubyBnv640Dfd0rVmaWv7LNTZpoZ/GbJAMA +EQEAAYkBHwQYAQIACQUCU5ygeQIbAgAKCRDt1A0FCB6SP0zCB/sEzaVR38vpx+OQ +MMynCBJrakiqDmUZv9xtplY7zsHSQjpd6xGflbU2n+iX99Q+nav0ETQZifNUEd4N +1ljDGQejcTyKD6Pkg6wBL3x9/RJye7Zszazm4+toJXZ8xJ3800+BtaPoI39akYJm ++ijzbskvN0v/j5GOFJwQO0pPRAFtdHqRs9Kf4YanxhedB4dIUblzlIJuKsxFit6N +lgGRblagG3Vv2eBszbxzPbJjHCgVLR3RmrVezKOsZjr/2i7X+xLWIR0uD3IN1qOW +CXQxLBizEEmSNVNxsp7KPGTLnqO3bPtqFirxS9PJLIMPTPLNBY7ZYuPNTMqVIUWF +4artDmrG +=7FfJ +-----END PGP PUBLIC KEY BLOCK-----` + +const invalidCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY +ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG +zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54 +QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ +QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo +9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu +Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/ +dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R +JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL +ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew +RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW +/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu +yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAIINDqlj7X6jYKc6DjwrOkjQ +UIRWbQQar0LwmNilehmt70g5DCL1SYm9q4LcgJJ2Nhxj0/5qqsYib50OSWMcKeEe +iRXpXzv1ObpcQtI5ithp0gR53YPXBib80t3bUzomQ5UyZqAAHzMp3BKC54/vUrSK +FeRaxDzNLrCeyI00+LHNUtwghAqHvdNcsIf8VRumK8oTm3RmDh0TyjASWYbrt9c8 +R1Um3zuoACOVy+mEIgIzsfHq0u7dwYwJB5+KeM7ZLx+HGIYdUYzHuUE1sLwVoELh ++SHIGHI1HDicOjzqgajShuIjj5hZTyQySVprrsLKiXS6NEwHAP20+XjayJ/R3tEA +EQEAAYkCPgQYAQIBKAUCU5ygeQIbAsBdIAQZAQIABgUCU5ygeQAKCRCpVlnFZmhO +52RJB/9uD1MSa0wjY6tHOIgquZcP3bHBvHmrHNMw9HR2wRCMO91ZkhrpdS3ZHtgb +u3/55etj0FdvDo1tb8P8FGSVtO5Vcwf5APM8sbbqoi8L951Q3i7qt847lfhu6sMl +w0LWFvPTOLHrliZHItPRjOltS1WAWfr2jUYhsU9ytaDAJmvf9DujxEOsN5G1YJep +54JCKVCkM/y585Zcnn+yxk/XwqoNQ0/iJUT9qRrZWvoeasxhl1PQcwihCwss44A+ +YXaAt3hbk+6LEQuZoYS73yR3WHj+42tfm7YxRGeubXfgCEz/brETEWXMh4pe0vCL +bfWrmfSPq2rDegYcAybxRQz0lF8PAAoJEO3UDQUIHpI/exkH/0vQfdHA8g/N4T6E +i6b1CUVBAkvtdJpCATZjWPhXmShOw62gkDw306vHPilL4SCvEEi4KzG72zkp6VsB +DSRcpxCwT4mHue+duiy53/aRMtSJ+vDfiV1Vhq+3sWAck/yUtfDU9/u4eFaiNok1 +8/Gd7reyuZt5CiJnpdPpjCwelK21l2w7sHAnJF55ITXdOxI8oG3BRKufz0z5lyDY +s2tXYmhhQIggdgelN8LbcMhWs/PBbtUr6uZlNJG2lW1yscD4aI529VjwJlCeo745 +U7pO4eF05VViUJ2mmfoivL3tkhoTUWhx8xs8xCUcCg8DoEoSIhxtOmoTPR22Z9BL +6LCg2mg= +=Dhm4 +-----END PGP PUBLIC KEY BLOCK-----` + +const goodCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1 + +mI0EVUqeVwEEAMufHRrMPWK3gyvi0O0tABCs/oON9zV9KDZlr1a1M91ShCSFwCPo +7r80PxdWVWcj0V5h50/CJYtpN3eE/mUIgW2z1uDYQF1OzrQ8ubrksfsJvpAhENom +lTQEppv9mV8qhcM278teb7TX0pgrUHLYF5CfPdp1L957JLLXoQR/lwLVABEBAAG0 +E2dvb2Qtc2lnbmluZy1zdWJrZXmIuAQTAQIAIgUCVUqeVwIbAwYLCQgHAwIGFQgC +CQoLBBYCAwECHgECF4AACgkQNRjL95IRWP69XQQAlH6+eyXJN4DZTLX78KGjHrsw +6FCvxxClEPtPUjcJy/1KCRQmtLAt9PbbA78dvgzjDeZMZqRAwdjyJhjyg/fkU2OH +7wq4ktjUu+dLcOBb+BFMEY+YjKZhf6EJuVfxoTVr5f82XNPbYHfTho9/OABKH6kv +X70PaKZhbwnwij8Nts65AaIEVUqftREEAJ3WxZfqAX0bTDbQPf2CMT2IVMGDfhK7 +GyubOZgDFFjwUJQvHNvsrbeGLZ0xOBumLINyPO1amIfTgJNm1iiWFWfmnHReGcDl +y5mpYG60Mb79Whdcer7CMm3AqYh/dW4g6IB02NwZMKoUHo3PXmFLxMKXnWyJ0clw +R0LI/Qn509yXAKDh1SO20rqrBM+EAP2c5bfI98kyNwQAi3buu94qo3RR1ZbvfxgW +CKXDVm6N99jdZGNK7FbRifXqzJJDLcXZKLnstnC4Sd3uyfyf1uFhmDLIQRryn5m+ +LBYHfDBPN3kdm7bsZDDq9GbTHiFZUfm/tChVKXWxkhpAmHhU/tH6GGzNSMXuIWSO +aOz3Rqq0ED4NXyNKjdF9MiwD/i83S0ZBc0LmJYt4Z10jtH2B6tYdqnAK29uQaadx +yZCX2scE09UIm32/w7pV77CKr1Cp/4OzAXS1tmFzQ+bX7DR+Gl8t4wxr57VeEMvl +BGw4Vjh3X8//m3xynxycQU18Q1zJ6PkiMyPw2owZ/nss3hpSRKFJsxMLhW3fKmKr +Ey2KiOcEGAECAAkFAlVKn7UCGwIAUgkQNRjL95IRWP5HIAQZEQIABgUCVUqftQAK +CRD98VjDN10SqkWrAKDTpEY8D8HC02E/KVC5YUI01B30wgCgurpILm20kXEDCeHp +C5pygfXw1DJrhAP+NyPJ4um/bU1I+rXaHHJYroYJs8YSweiNcwiHDQn0Engh/mVZ +SqLHvbKh2dL/RXymC3+rjPvQf5cup9bPxNMa6WagdYBNAfzWGtkVISeaQW+cTEp/ +MtgVijRGXR/lGLGETPg2X3Afwn9N9bLMBkBprKgbBqU7lpaoPupxT61bL70= +=vtbN +-----END PGP PUBLIC KEY BLOCK-----` + +const revokedUserIDKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQENBFsgO5EBCADhREPmcjsPkXe1z7ctvyWL0S7oa9JaoGZ9oPDHFDlQxd0qlX2e +DZJZDg0qYvVixmaULIulApq1puEsaJCn3lHUbHlb4PYKwLEywYXM28JN91KtLsz/ +uaEX2KC5WqeP40utmzkNLq+oRX/xnRMgwbO7yUNVG2UlEa6eI+xOXO3YtLdmJMBW +ClQ066ZnOIzEo1JxnIwha1CDBMWLLfOLrg6l8InUqaXbtEBbnaIYO6fXVXELUjkx +nmk7t/QOk0tXCy8muH9UDqJkwDUESY2l79XwBAcx9riX8vY7vwC34pm22fAUVLCJ +x1SJx0J8bkeNp38jKM2Zd9SUQqSbfBopQ4pPABEBAAG0I0dvbGFuZyBHb3BoZXIg +PG5vLXJlcGx5QGdvbGFuZy5jb20+iQFUBBMBCgA+FiEE5Ik5JLcNx6l6rZfw1oFy +9I6cUoMFAlsgO5ECGwMFCQPCZwAFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AACgkQ +1oFy9I6cUoMIkwf8DNPeD23i4jRwd/pylbvxwZintZl1fSwTJW1xcOa1emXaEtX2 +depuqhP04fjlRQGfsYAQh7X9jOJxAHjTmhqFBi5sD7QvKU00cPFYbJ/JTx0B41bl +aXnSbGhRPh63QtEZL7ACAs+shwvvojJqysx7kyVRu0EW2wqjXdHwR/SJO6nhNBa2 +DXzSiOU/SUA42mmG+5kjF8Aabq9wPwT9wjraHShEweNerNMmOqJExBOy3yFeyDpa +XwEZFzBfOKoxFNkIaVf5GSdIUGhFECkGvBMB935khftmgR8APxdU4BE7XrXexFJU +8RCuPXonm4WQOwTWR0vQg64pb2WKAzZ8HhwTGbQiR29sYW5nIEdvcGhlciA8cmV2 +b2tlZEBnb2xhbmcuY29tPokBNgQwAQoAIBYhBOSJOSS3Dcepeq2X8NaBcvSOnFKD +BQJbIDv3Ah0AAAoJENaBcvSOnFKDfWMIAKhI/Tvu3h8fSUxp/gSAcduT6bC1JttG +0lYQ5ilKB/58lBUA5CO3ZrKDKlzW3M8VEcvohVaqeTMKeoQd5rCZq8KxHn/KvN6N +s85REfXfniCKfAbnGgVXX3kDmZ1g63pkxrFu0fDZjVDXC6vy+I0sGyI/Inro0Pzb +tvn0QCsxjapKK15BtmSrpgHgzVqVg0cUp8vqZeKFxarYbYB2idtGRci4b9tObOK0 +BSTVFy26+I/mrFGaPrySYiy2Kz5NMEcRhjmTxJ8jSwEr2O2sUR0yjbgUAXbTxDVE +/jg5fQZ1ACvBRQnB7LvMHcInbzjyeTM3FazkkSYQD6b97+dkWwb1iWG5AQ0EWyA7 +kQEIALkg04REDZo1JgdYV4x8HJKFS4xAYWbIva1ZPqvDNmZRUbQZR2+gpJGEwn7z +VofGvnOYiGW56AS5j31SFf5kro1+1bZQ5iOONBng08OOo58/l1hRseIIVGB5TGSa +PCdChKKHreJI6hS3mShxH6hdfFtiZuB45rwoaArMMsYcjaezLwKeLc396cpUwwcZ +snLUNd1Xu5EWEF2OdFkZ2a1qYdxBvAYdQf4+1Nr+NRIx1u1NS9c8jp3PuMOkrQEi +bNtc1v6v0Jy52mKLG4y7mC/erIkvkQBYJdxPaP7LZVaPYc3/xskcyijrJ/5ufoD8 +K71/ShtsZUXSQn9jlRaYR0EbojMAEQEAAYkBPAQYAQoAJhYhBOSJOSS3Dcepeq2X +8NaBcvSOnFKDBQJbIDuRAhsMBQkDwmcAAAoJENaBcvSOnFKDkFMIAIt64bVZ8x7+ +TitH1bR4pgcNkaKmgKoZz6FXu80+SnbuEt2NnDyf1cLOSimSTILpwLIuv9Uft5Pb +OraQbYt3xi9yrqdKqGLv80bxqK0NuryNkvh9yyx5WoG1iKqMj9/FjGghuPrRaT4l +QinNAghGVkEy1+aXGFrG2DsOC1FFI51CC2WVTzZ5RwR2GpiNRfESsU1rZAUqf/2V +yJl9bD5R4SUNy8oQmhOxi+gbhD4Ao34e4W0ilibslI/uawvCiOwlu5NGd8zv5n+U +heiQvzkApQup5c+BhH5zFDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB +7qTZOahrETw= +=IKnw +-----END PGP PUBLIC KEY BLOCK-----` + +const keyWithSubKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mI0EWyKwKQEEALwXhKBnyaaNFeK3ljfc/qn9X/QFw+28EUfgZPHjRmHubuXLE2uR +s3ZoSXY2z7Dkv+NyHYMt8p+X8q5fR7JvUjK2XbPyKoiJVnHINll83yl67DaWfKNL +EjNoO0kIfbXfCkZ7EG6DL+iKtuxniGTcnGT47e+HJSqb/STpLMnWwXjBABEBAAG0 +I0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFuZy5jb20+iM4EEwEKADgWIQQ/ +lRafP/p9PytHbwxMvYJsOQdOOAUCWyKwKQIbAwULCQgHAwUVCgkICwUWAgMBAAIe +AQIXgAAKCRBMvYJsOQdOOOsFBAC62mXww8XuqvYLcVOvHkWLT6mhxrQOJXnlfpn7 +2uBV9CMhoG/Ycd43NONsJrB95Apr9TDIqWnVszNbqPCuBhZQSGLdbiDKjxnCWBk0 +69qv4RNtkpOhYB7jK4s8F5oQZqId6JasT/PmJTH92mhBYhhTQr0GYFuPX2UJdkw9 +Sn9C67iNBFsisDUBBAC3A+Yo9lgCnxi/pfskyLrweYif6kIXWLAtLTsM6g/6jt7b +wTrknuCPyTv0QKGXsAEe/cK/Xq3HvX9WfXPGIHc/X56ZIsHQ+RLowbZV/Lhok1IW +FAuQm8axr/by80cRwFnzhfPc/ukkAq2Qyj4hLsGblu6mxeAhzcp8aqmWOO2H9QAR +AQABiLYEKAEKACAWIQQ/lRafP/p9PytHbwxMvYJsOQdOOAUCWyK16gIdAAAKCRBM +vYJsOQdOOB1vA/4u4uLONsE+2GVOyBsHyy7uTdkuxaR9b54A/cz6jT/tzUbeIzgx +22neWhgvIEghnUZd0vEyK9k1wy5vbDlEo6nKzHso32N1QExGr5upRERAxweDxGOj +7luDwNypI7QcifE64lS/JmlnunwRCdRWMKc0Fp+7jtRc5mpwyHN/Suf5RokBagQY +AQoAIBYhBD+VFp8/+n0/K0dvDEy9gmw5B044BQJbIrA1AhsCAL8JEEy9gmw5B044 +tCAEGQEKAB0WIQSNdnkaWY6t62iX336UXbGvYdhXJwUCWyKwNQAKCRCUXbGvYdhX +JxJSA/9fCPHP6sUtGF1o3G1a3yvOUDGr1JWcct9U+QpbCt1mZoNopCNDDQAJvDWl +mvDgHfuogmgNJRjOMznvahbF+wpTXmB7LS0SK412gJzl1fFIpK4bgnhu0TwxNsO1 +8UkCZWqxRMgcNUn9z6XWONK8dgt5JNvHSHrwF4CxxwjL23AAtK+FA/UUoi3U4kbC +0XnSr1Sl+mrzQi1+H7xyMe7zjqe+gGANtskqexHzwWPUJCPZ5qpIa2l8ghiUim6b +4ymJ+N8/T8Yva1FaPEqfMzzqJr8McYFm0URioXJPvOAlRxdHPteZ0qUopt/Jawxl +Xt6B9h1YpeLoJwjwsvbi98UTRs0jXwoY +=3fWu +-----END PGP PUBLIC KEY BLOCK-----` + +const keyWithSubKeyAndBadSelfSigOrder = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mI0EWyLLDQEEAOqIOpJ/ha1OYAGduu9tS3rBz5vyjbNgJO4sFveEM0mgsHQ0X9/L +plonW+d0gRoO1dhJ8QICjDAc6+cna1DE3tEb5m6JtQ30teLZuqrR398Cf6w7NNVz +r3lrlmnH9JaKRuXl7tZciwyovneBfZVCdtsRZjaLI1uMQCz/BToiYe3DABEBAAG0 +I0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFuZy5jb20+iM4EEwEKADgWIQRZ +sixZOfQcZdW0wUqmgmdsv1O9xgUCWyLLDQIbAwULCQgHAwUVCgkICwUWAgMBAAIe +AQIXgAAKCRCmgmdsv1O9xql2A/4pix98NxjhdsXtazA9agpAKeADf9tG4Za27Gj+ +3DCww/E4iP2X35jZimSm/30QRB6j08uGCqd9vXkkJxtOt63y/IpVOtWX6vMWSTUm +k8xKkaYMP0/IzKNJ1qC/qYEUYpwERBKg9Z+k99E2Ql4kRHdxXUHq6OzY79H18Y+s +GdeM/riNBFsiyxsBBAC54Pxg/8ZWaZX1phGdwfe5mek27SOYpC0AxIDCSOdMeQ6G +HPk38pywl1d+S+KmF/F4Tdi+kWro62O4eG2uc/T8JQuRDUhSjX0Qa51gPzJrUOVT +CFyUkiZ/3ZDhtXkgfuso8ua2ChBgR9Ngr4v43tSqa9y6AK7v0qjxD1x+xMrjXQAR +AQABiQFxBBgBCgAmAhsCFiEEWbIsWTn0HGXVtMFKpoJnbL9TvcYFAlsizTIFCQAN +MRcAv7QgBBkBCgAdFiEEJcoVUVJIk5RWj1c/o62jUpRPICQFAlsiyxsACgkQo62j +UpRPICQq5gQApoWIigZxXFoM0uw4uJBS5JFZtirTANvirZV5RhndwHeMN6JttaBS +YnjyA4+n1D+zB2VqliD2QrsX12KJN6rGOehCtEIClQ1Hodo9nC6kMzzAwW1O8bZs +nRJmXV+bsvD4sidLZLjdwOVa3Cxh6pvq4Uur6a7/UYx121hEY0Qx0s8JEKaCZ2y/ +U73GGi0D/i20VW8AWYAPACm2zMlzExKTOAV01YTQH/3vW0WLrOse53WcIVZga6es +HuO4So0SOEAvxKMe5HpRIu2dJxTvd99Bo9xk9xJU0AoFrO0vNCRnL+5y68xMlODK +lEw5/kl0jeaTBp6xX0HDQOEVOpPGUwWV4Ij2EnvfNDXaE1vK1kffiQFrBBgBCgAg +AhsCFiEEWbIsWTn0HGXVtMFKpoJnbL9TvcYFAlsi0AYAv7QgBBkBCgAdFiEEJcoV +UVJIk5RWj1c/o62jUpRPICQFAlsiyxsACgkQo62jUpRPICQq5gQApoWIigZxXFoM +0uw4uJBS5JFZtirTANvirZV5RhndwHeMN6JttaBSYnjyA4+n1D+zB2VqliD2QrsX +12KJN6rGOehCtEIClQ1Hodo9nC6kMzzAwW1O8bZsnRJmXV+bsvD4sidLZLjdwOVa +3Cxh6pvq4Uur6a7/UYx121hEY0Qx0s8JEKaCZ2y/U73GRl0EAJokkXmy4zKDHWWi +wvK9gi2gQgRkVnu2AiONxJb5vjeLhM/07BRmH6K1o+w3fOeEQp4FjXj1eQ5fPSM6 +Hhwx2CTl9SDnPSBMiKXsEFRkmwQ2AAsQZLmQZvKBkLZYeBiwf+IY621eYDhZfo+G +1dh1WoUCyREZsJQg2YoIpWIcvw+a +=bNRo +-----END PGP PUBLIC KEY BLOCK----- +` + +const onlySubkeyNoPrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Version: GnuPG v1 + +lQCVBFggvocBBAC7vBsHn7MKmS6IiiZNTXdciplVgS9cqVd+RTdIAoyNTcsiV1H0 +GQ3QtodOPeDlQDNoqinqaobd7R9g3m3hS53Nor7yBZkCWQ5x9v9JxRtoAq0sklh1 +I1X2zEqZk2l6YrfBF/64zWrhjnW3j23szkrAIVu0faQXbQ4z56tmZrw11wARAQAB +/gdlAkdOVQG0CUdOVSBEdW1teYi4BBMBAgAiBQJYIL6HAhsDBgsJCAcDAgYVCAIJ +CgsEFgIDAQIeAQIXgAAKCRCd1xxWp1CYAnjGA/9synn6ZXJUKAXQzySgmCZvCIbl +rqBfEpxwLG4Q/lONhm5vthAE0z49I8hj5Gc5e2tLYUtq0o0OCRdCrYHa/efOYWpJ +6RsK99bePOisVzmOABLIgZkcr022kHoMCmkPgv9CUGKP1yqbGl+zzAwQfUjRUmvD +ZIcWLHi2ge4GzPMPi50B2ARYIL6cAQQAxWHnicKejAFcFcF1/3gUSgSH7eiwuBPX +M7vDdgGzlve1o1jbV4tzrjN9jsCl6r0nJPDMfBSzgLr1auNTRG6HpJ4abcOx86ED +Ad+avDcQPZb7z3dPhH/gb2lQejZsHh7bbeOS8WMSzHV3RqCLd8J/xwWPNR5zKn1f +yp4IGfopidMAEQEAAQAD+wQOelnR82+dxyM2IFmZdOB9wSXQeCVOvxSaNMh6Y3lk +UOOkO8Nlic4x0ungQRvjoRs4wBmCuwFK/MII6jKui0B7dn/NDf51i7rGdNGuJXDH +e676By1sEY/NGkc74jr74T+5GWNU64W0vkpfgVmjSAzsUtpmhJMXsc7beBhJdnVl +AgDKCb8hZqj1alcdmLoNvb7ibA3K/V8J462CPD7bMySPBa/uayoFhNxibpoXml2r +oOtHa5izF3b0/9JY97F6rqkdAgD6GdTJ+xmlCoz1Sewoif1I6krq6xoa7gOYpIXo +UL1Afr+LiJeyAnF/M34j/kjIVmPanZJjry0kkjHE5ILjH3uvAf4/6n9np+Th8ujS +YDCIzKwR7639+H+qccOaddCep8Y6KGUMVdD/vTKEx1rMtK+hK/CDkkkxnFslifMJ +kqoqv3WUqCWJAT0EGAECAAkFAlggvpwCGwIAqAkQndccVqdQmAKdIAQZAQIABgUC +WCC+nAAKCRDmGUholQPwvQk+A/9latnSsR5s5/1A9TFki11GzSEnfLbx46FYOdkW +n3YBxZoPQGxNA1vIn8GmouxZInw9CF4jdOJxEdzLlYQJ9YLTLtN5tQEMl/19/bR8 +/qLacAZ9IOezYRWxxZsyn6//jfl7A0Y+FV59d4YajKkEfItcIIlgVBSW6T+TNQT3 +R+EH5HJ/A/4/AN0CmBhhE2vGzTnVU0VPrE4V64pjn1rufFdclgpixNZCuuqpKpoE +VVHn6mnBf4njKjZrAGPs5kfQ+H4NsM7v3Zz4yV6deu9FZc4O6E+V1WJ38rO8eBix +7G2jko106CC6vtxsCPVIzY7aaG3H5pjRtomw+pX7SzrQ7FUg2PGumg== +=F/T0 +-----END PGP PRIVATE KEY BLOCK-----` + +const ecdsaPrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +xaUEX1KsSRMIKoZIzj0DAQcCAwTpYqJsnJiFhKKh+8TulWD+lVmerBFNS+Ii +B+nlG3T0xQQ4Sy5eIjJ0CExIQQzi3EElF/Z2l4F3WC5taFA11NgA/gkDCHSS +PThf1M2K4LN8F1MRcvR+sb7i0nH55ojkwuVB1DE6jqIT9m9i+mX1tzjSAS+6 +lPQiweCJvG7xTC7Hs3AzRapf/r1At4TB+v+5G2/CKynNFEJpbGwgPGJpbGxA +aG9tZS5jb20+wncEEBMIAB8FAl9SrEkGCwkHCAMCBBUICgIDFgIBAhkBAhsD +Ah4BAAoJEMpwT3+q3+xqw5UBAMebZN9isEZ1ML+R/jWAAWMwa/knMugrEZ1v +Bl9+ZwM0AQCZdf80/wYY4Nve01qSRFv8OmKswLli3TvDv6FKc4cLz8epBF9S +rEkSCCqGSM49AwEHAgMEAjKnT9b5wY2bf9TpAV3d7OUfPOxKj9c4VzeVzSrH +AtQgo/MuI1cdYVURicV4i76DNjFhQHQFTk7BrC+C2u1yqQMBCAf+CQMIHImA +iYfzQtjgQWSFZYUkCFpbbwhNF0ch+3HNaZkaHCnZRIsWsRnc6FCb6lRQyK9+ +Dq59kHlduE5QgY40894jfmP2JdJHU6nBdYrivbEdbMJhBBgTCAAJBQJfUqxJ +AhsMAAoJEMpwT3+q3+xqUI0BAMykhV08kQ4Ip9Qlbss6Jdufv7YrU0Vd5hou +b5TmiPd0APoDBh3qIic+aLLUcAuG3+Gt1P1AbUlmqV61ozn1WfHxfw== +=KLN8 +-----END PGP PRIVATE KEY BLOCK-----` + +const dsaPrivateKeyWithElGamalSubkey = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +lQOBBF9/MLsRCACeaF6BI0jTgDAs86t8/kXPfwlPvR2MCYzB0BCqAdcq1hV/GTYd +oNmJRna/ZJfsI/vf+d8Nv+EYOQkPheFS1MJVBitkAXjQPgm8i1tQWen1FCWZxqGk +/vwZYF4yo8GhZ+Wxi3w09W9Cp9QM/CTmyE1Xe7wpPBGe+oD+me8Zxjyt8JBS4Qx+ +gvWbfHxfHnggh4pz7U8QkItlLsBNQEdX4R5+zwRN66g2ZSX/shaa/EkVnihUhD7r +njP9I51ORWucTQD6OvgooaNQZCkQ/Se9TzdakwWKS2XSIFXiY/e2E5ZgKI/pfKDU +iA/KessxddPb7nP/05OIJqg9AoDrD4vmehLzAQD+zsUS3LDU1m9/cG4LMsQbT2VK +Te4HqbGIAle+eu/asQf8DDJMrbZpiJZvADum9j0TJ0oep6VdMbzo9RSDKvlLKT9m +kG63H8oDWnCZm1a+HmGq9YIX+JHWmsLXXsFLeEouLzHO+mZo0X28eji3V2T87hyR +MmUM0wFo4k7jK8uVmkDXv3XwNp2uByWxUKZd7EnWmcEZWqIiexJ7XpCS0Pg3tRaI +zxve0SRe/dxfUPnTk/9KQ9hS6DWroBKquL182zx1Fggh4LIWWE2zq+UYn8BI0E8A +rmIDFJdF8ymFQGRrEy6g79NnkPmkrZWsgMRYY65P6v4zLVmqohJKkpm3/Uxa6QAP +CCoPh/JTOvPeCP2bOJH8z4Z9Py3ouMIjofQW8sXqRgf/RIHbh0KsINHrwwZ4gVIr +MK3RofpaYxw1ztPIWb4cMWoWZHH1Pxh7ggTGSBpAhKXkiWw2Rxat8QF5aA7e962c +bLvVv8dqsPrD/RnVJHag89cbPTzjn7gY9elE8EM8ithV3oQkwHTr4avYlpDZsgNd +hUW3YgRwGo31tdzxoG04AcpV2t+07P8XMPr9hsfWs4rHohXPi38Hseu1Ji+dBoWQ +3+1w/HH3o55s+jy4Ruaz78AIrjbmAJq+6rA2mIcCgrhw3DnzuwQAKeBvSeqn9zfS +ZC812osMBVmkycwelpaIh64WZ0vWL3GvdXDctV2kXM+qVpDTLEny0LuiXxrwCKQL +Ev4HAwK9uQBcreDEEud7pfRb8EYP5lzO2ZA7RaIvje6EWAGBvJGMRT0QQE5SGqc7 +Fw5geigBdt+vVyRuNNhg3c2fdn/OBQaYu0J/8AiOogG8EaM8tCFlbGdhbWFsQGRz +YS5jb20gPGVsZ2FtYWxAZHNhLmNvbT6IkAQTEQgAOBYhBI+gnfiHQxB35/Dp0XAQ +aE/rsWC5BQJffzC7AhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEHAQaE/r +sWC5A4EA/0GcJmyPtN+Klc7b9sVT3JgKTRnB/URxOJfYJofP0hZLAQCkqyMO+adV +JvbgDH0zaITQWZSSXPqpgMpCA6juTrDsd50CawRffzC7EAgAxFFFSAAEQzWTgKU5 +EBtpxxoPzHqcChawTHRxHxjcELXzmUBS5PzfA1HXSPnNqK/x3Ut5ycC3CsW41Fnt +Gm3706Wu9VFbFZVn55F9lPiplUo61n5pqMvOr1gmuQsdXiTa0t5FRa4TZ2VSiHFw +vdAVSPTUsT4ZxJ1rPyFYRtq1n3pQcvdZowd07r0JnzTMjLLMFYCKhwIowoOC4zqJ +iB8enjwOlpaqBATRm9xpVF7SJkroPF6/B1vdhj7E3c1aJyHlo0PYBAg756sSHWHg +UuLyUQ4TA0hcCVenn/L/aSY2LnbdZB1EBhlYjA7dTCgwIqsQhfQmPkjz6g64A7+Y +HbbrLwADBQgAk14QIEQ+J/VHetpQV/jt2pNsFK1kVK7mXK0spTExaC2yj2sXlHjL +Ie3bO5T/KqmIaBEB5db5fA5xK9cZt79qrQHDKsEqUetUeMUWLBx77zBsus3grIgy +bwDZKseRzQ715pwxquxQlScGoDIBKEh08HpwHkq140eIj3w+MAIfndaZaSCNaxaP +Snky7BQmJ7Wc7qrIwoQP6yrnUqyW2yNi81nJYUhxjChqaFSlwzLs/iNGryBKo0ic +BqVIRjikKHBlwBng6WyrltQo/Vt9GG8w+lqaAVXbJRlaBZJUR+2NKi/YhP3qQse3 +v8fi4kns0gh5LK+2C01RvdX4T49QSExuIf4HAwLJqYIGwadA2uem5v7/765ZtFWV +oL0iZ0ueTJDby4wTFDpLVzzDi/uVcB0ZRFrGOp7w6OYcNYTtV8n3xmli2Q5Trw0c +wZVzvg+ABKWiv7faBjMczIFF8y6WZKOIeAQYEQgAIBYhBI+gnfiHQxB35/Dp0XAQ +aE/rsWC5BQJffzC7AhsMAAoJEHAQaE/rsWC5ZmIA/jhS4r4lClbvjuPWt0Yqdn7R +fss2SPMYvMrrDh42aE0OAQD8xn4G6CN8UtW9xihXOY6FpxiJ/sMc2VaneeUd34oa +4g== +=XZm8 +-----END PGP PRIVATE KEY BLOCK-----` + +// https://tests.sequoia-pgp.org/#Certificate_expiration +// P _ U p +const expiringPrimaryUIDKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +xsDNBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv +/seOXpgecTdOcVttfzC8ycIKrt3aQTiwOG/ctaR4Bk/t6ayNFfdUNxHWk4WCKzdz +/56fW2O0F23qIRd8UUJp5IIlN4RDdRCtdhVQIAuzvp2oVy/LaS2kxQoKvph/5pQ/ +5whqsyroEWDJoSV0yOb25B/iwk/pLUFoyhDG9bj0kIzDxrEqW+7Ba8nocQlecMF3 +X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv +9CurEOtxUw6N3RdOtLmYZS9uEnn5y1UkF88o8Nku890uk6BrewFzJyLAx5wRZ4F0 +qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb +SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb +vLIwa3T4CyshfT0AEQEAAc0hQm9iIEJhYmJhZ2UgPGJvYkBvcGVucGdwLmV4YW1w +bGU+wsEUBBMBCgBIBYJfiWKKBYkBX+/UBQsJCAcCCRD7/MgqAV5zMAYVCgkICwIE +FgIDAQIXgAIbAwIeARYhBNGmbhojsYLJmA94jPv8yCoBXnMwAAAf0wv/a++a3DkL +CttbK4LRIiSry2wb97mxYQoWYzvkKYD1IP/KiamRwzjBKuRSE0qZ2uonQDRGc+zl +1H9dwkDLL4T9uJngCCPCBgFW/hOFPvF+WYEKHtOzunqx6KwDHkpdH+hpzfFzIhDo +aXiVnDGvJ3H/bVTKq1m2KPXO2ckkXPQXJX9Fx6kPHdvcq+3ZI75IzbD/ue5lBcsy +OKLzVu+KLxzlzGui6v1V0fTvU/uqvHlvUxcDAqMnIsDUPakjK2RDeQ38qtN6+PQ5 +/dT7vx2Wtzesqn2eDbDf5uRfSgmp2hJLJniAKjMCBVAJiOgPb0LXUIcwCGxaiWOA +g5ZvNWjX5bZ/FxqpLpOE9OReI5YY7ns7zqP4thLYYWe0Qdp9a2ezVrgzgrh/SLla +d73x/S9TrmLtYGGlbVUByJW+GXjW2Tt6iaa/WDFzx8NvZ/wzIAdGSEfLcvS+JBSP +2ppdY5Ac/2dK3PzYABkHvB/rhXIwlXnrFDU9efRHZfFqQqGauA64wfdIzsDNBF2l +nPIBDADWML9cbGMrp12CtF9b2P6z9TTT74S8iyBOzaSvdGDQY/sUtZXRg21HWamX +nn9sSXvIDEINOQ6A9QxdxoqWdCHrOuW3ofneYXoG+zeKc4dC86wa1TR2q9vW+RMX +SO4uImA+Uzula/6k1DogDf28qhCxMwG/i/m9g1c/0aApuDyKdQ1PXsHHNlgd/Dn6 +rrd5y2AObaifV7wIhEJnvqgFXDN2RXGjLeCOHV4Q2WTYPg/S4k1nMXVDwZXrvIsA +0YwIMgIT86Rafp1qKlgPNbiIlC1g9RY/iFaGN2b4Ir6GDohBQSfZW2+LXoPZuVE/ +wGlQ01rh827KVZW4lXvqsge+wtnWlszcselGATyzqOK9LdHPdZGzROZYI2e8c+pa +LNDdVPL6vdRBUnkCaEkOtl1mr2JpQi5nTU+gTX4IeInC7E+1a9UDF/Y85ybUz8XV +8rUnR76UqVC7KidNepdHbZjjXCt8/Zo+Tec9JNbYNQB/e9ExmDntmlHEsSEQzFwz +j8sxH48AEQEAAcLA9gQYAQoAIBYhBNGmbhojsYLJmA94jPv8yCoBXnMwBQJdpZzy +AhsMAAoJEPv8yCoBXnMw6f8L/26C34dkjBffTzMj5Bdzm8MtF67OYneJ4TQMw7+4 +1IL4rVcSKhIhk/3Ud5knaRtP2ef1+5F66h9/RPQOJ5+tvBwhBAcUWSupKnUrdVaZ +QanYmtSxcVV2PL9+QEiNN3tzluhaWO//rACxJ+K/ZXQlIzwQVTpNhfGzAaMVV9zp +f3u0k14itcv6alKY8+rLZvO1wIIeRZLmU0tZDD5HtWDvUV7rIFI1WuoLb+KZgbYn +3OWjCPHVdTrdZ2CqnZbG3SXw6awH9bzRLV9EXkbhIMez0deCVdeo+wFFklh8/5VK +2b0vk/+wqMJxfpa1lHvJLobzOP9fvrswsr92MA2+k901WeISR7qEzcI0Fdg8AyFA +ExaEK6VyjP7SXGLwvfisw34OxuZr3qmx1Sufu4toH3XrB7QJN8XyqqbsGxUCBqWi +f9RSK4xjzRTe56iPeiSJJOIciMP9i2ldI+KgLycyeDvGoBj0HCLO3gVaBe4ubVrj +5KjhX2PVNEJd3XZRzaXZE2aAMQ== +=522n +-----END PGP PUBLIC KEY BLOCK-----` diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go new file mode 100644 index 0000000000..7350974eff --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go @@ -0,0 +1,56 @@ +// Copyright (C) 2019 ProtonTech AG + +package packet + +import "math/bits" + +// AEADConfig collects a number of AEAD parameters along with sensible defaults. +// A nil AEADConfig is valid and results in all default values. +type AEADConfig struct { + // The AEAD mode of operation. + DefaultMode AEADMode + // Amount of octets in each chunk of data + ChunkSize uint64 +} + +// Mode returns the AEAD mode of operation. +func (conf *AEADConfig) Mode() AEADMode { + if conf == nil || conf.DefaultMode == 0 { + return AEADModeEAX + } + mode := conf.DefaultMode + if mode != AEADModeEAX && mode != AEADModeOCB && + mode != AEADModeExperimentalGCM { + panic("AEAD mode unsupported") + } + return mode +} + +// ChunkSizeByte returns the byte indicating the chunk size. The effective +// chunk size is computed with the formula uint64(1) << (chunkSizeByte + 6) +func (conf *AEADConfig) ChunkSizeByte() byte { + if conf == nil || conf.ChunkSize == 0 { + return 12 // 1 << (12 + 6) == 262144 bytes + } + + chunkSize := conf.ChunkSize + exponent := bits.Len64(chunkSize) - 1 + switch { + case exponent < 6: + exponent = 6 + case exponent > 27: + exponent = 27 + } + + return byte(exponent - 6) +} + +// decodeAEADChunkSize returns the effective chunk size. In 32-bit systems, the +// maximum returned value is 1 << 30. +func decodeAEADChunkSize(c byte) int { + size := uint64(1 << (c + 6)) + if size != uint64(int(size)) { + return 1 << 30 + } + return int(size) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go new file mode 100644 index 0000000000..85c53f08aa --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go @@ -0,0 +1,364 @@ +// Copyright (C) 2019 ProtonTech AG + +package packet + +import ( + "bytes" + "crypto/cipher" + "crypto/rand" + "encoding/binary" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" +) + +// AEADEncrypted represents an AEAD Encrypted Packet (tag 20, RFC4880bis-5.16). +type AEADEncrypted struct { + cipher CipherFunction + mode AEADMode + chunkSizeByte byte + Contents io.Reader // Encrypted chunks and tags + initialNonce []byte // Referred to as IV in RFC4880-bis +} + +// Only currently defined version +const aeadEncryptedVersion = 1 + +// An AEAD opener/sealer, its configuration, and data for en/decryption. +type aeadCrypter struct { + aead cipher.AEAD + chunkSize int + initialNonce []byte + associatedData []byte // Chunk-independent associated data + chunkIndex []byte // Chunk counter + bytesProcessed int // Amount of plaintext bytes encrypted/decrypted + buffer bytes.Buffer // Buffered bytes accross chunks +} + +// aeadEncrypter encrypts and writes bytes. It encrypts when necessary according +// to the AEAD block size, and buffers the extra encrypted bytes for next write. +type aeadEncrypter struct { + aeadCrypter // Embedded plaintext sealer + writer io.WriteCloser // 'writer' is a partialLengthWriter +} + +// aeadDecrypter reads and decrypts bytes. It buffers extra decrypted bytes when +// necessary, similar to aeadEncrypter. +type aeadDecrypter struct { + aeadCrypter // Embedded ciphertext opener + reader io.Reader // 'reader' is a partialLengthReader + peekedBytes []byte // Used to detect last chunk + eof bool +} + +func (ae *AEADEncrypted) parse(buf io.Reader) error { + headerData := make([]byte, 4) + if n, err := io.ReadFull(buf, headerData); n < 4 { + return errors.AEADError("could not read aead header:" + err.Error()) + } + // Read initial nonce + mode := AEADMode(headerData[2]) + nonceLen := mode.NonceLength() + if nonceLen == 0 { + return errors.AEADError("unknown mode") + } + initialNonce := make([]byte, nonceLen) + if n, err := io.ReadFull(buf, initialNonce); n < nonceLen { + return errors.AEADError("could not read aead nonce:" + err.Error()) + } + ae.Contents = buf + ae.initialNonce = initialNonce + c := headerData[1] + if _, ok := algorithm.CipherById[c]; !ok { + return errors.UnsupportedError("unknown cipher: " + string(c)) + } + ae.cipher = CipherFunction(c) + ae.mode = mode + ae.chunkSizeByte = byte(headerData[3]) + return nil +} + +// Decrypt returns a io.ReadCloser from which decrypted bytes can be read, or +// an error. +func (ae *AEADEncrypted) Decrypt(ciph CipherFunction, key []byte) (io.ReadCloser, error) { + return ae.decrypt(key) +} + +// decrypt prepares an aeadCrypter and returns a ReadCloser from which +// decrypted bytes can be read (see aeadDecrypter.Read()). +func (ae *AEADEncrypted) decrypt(key []byte) (io.ReadCloser, error) { + blockCipher := ae.cipher.new(key) + aead := ae.mode.new(blockCipher) + // Carry the first tagLen bytes + tagLen := ae.mode.TagLength() + peekedBytes := make([]byte, tagLen) + n, err := io.ReadFull(ae.Contents, peekedBytes) + if n < tagLen || (err != nil && err != io.EOF) { + return nil, errors.AEADError("Not enough data to decrypt:" + err.Error()) + } + chunkSize := decodeAEADChunkSize(ae.chunkSizeByte) + return &aeadDecrypter{ + aeadCrypter: aeadCrypter{ + aead: aead, + chunkSize: chunkSize, + initialNonce: ae.initialNonce, + associatedData: ae.associatedData(), + chunkIndex: make([]byte, 8), + }, + reader: ae.Contents, + peekedBytes: peekedBytes}, nil +} + +// Read decrypts bytes and reads them into dst. It decrypts when necessary and +// buffers extra decrypted bytes. It returns the number of bytes copied into dst +// and an error. +func (ar *aeadDecrypter) Read(dst []byte) (n int, err error) { + // Return buffered plaintext bytes from previous calls + if ar.buffer.Len() > 0 { + return ar.buffer.Read(dst) + } + + // Return EOF if we've previously validated the final tag + if ar.eof { + return 0, io.EOF + } + + // Read a chunk + tagLen := ar.aead.Overhead() + cipherChunkBuf := new(bytes.Buffer) + _, errRead := io.CopyN(cipherChunkBuf, ar.reader, int64(ar.chunkSize + tagLen)) + cipherChunk := cipherChunkBuf.Bytes() + if errRead != nil && errRead != io.EOF { + return 0, errRead + } + decrypted, errChunk := ar.openChunk(cipherChunk) + if errChunk != nil { + return 0, errChunk + } + + // Return decrypted bytes, buffering if necessary + if len(dst) < len(decrypted) { + n = copy(dst, decrypted[:len(dst)]) + ar.buffer.Write(decrypted[len(dst):]) + } else { + n = copy(dst, decrypted) + } + + // Check final authentication tag + if errRead == io.EOF { + errChunk := ar.validateFinalTag(ar.peekedBytes) + if errChunk != nil { + return n, errChunk + } + ar.eof = true // Mark EOF for when we've returned all buffered data + } + return +} + +// Close is noOp. The final authentication tag of the stream was already +// checked in the last Read call. In the future, this function could be used to +// wipe the reader and peeked, decrypted bytes, if necessary. +func (ar *aeadDecrypter) Close() (err error) { + return nil +} + +// SerializeAEADEncrypted initializes the aeadCrypter and returns a writer. +// This writer encrypts and writes bytes (see aeadEncrypter.Write()). +func SerializeAEADEncrypted(w io.Writer, key []byte, cipher CipherFunction, mode AEADMode, config *Config) (io.WriteCloser, error) { + writeCloser := noOpCloser{w} + writer, err := serializeStreamHeader(writeCloser, packetTypeAEADEncrypted) + if err != nil { + return nil, err + } + + // Data for en/decryption: tag, version, cipher, aead mode, chunk size + aeadConf := config.AEAD() + prefix := []byte{ + 0xD4, + aeadEncryptedVersion, + byte(config.Cipher()), + byte(aeadConf.Mode()), + aeadConf.ChunkSizeByte(), + } + n, err := writer.Write(prefix[1:]) + if err != nil || n < 4 { + return nil, errors.AEADError("could not write AEAD headers") + } + // Sample nonce + nonceLen := aeadConf.Mode().NonceLength() + nonce := make([]byte, nonceLen) + n, err = rand.Read(nonce) + if err != nil { + panic("Could not sample random nonce") + } + _, err = writer.Write(nonce) + if err != nil { + return nil, err + } + blockCipher := CipherFunction(config.Cipher()).new(key) + alg := AEADMode(aeadConf.Mode()).new(blockCipher) + + chunkSize := decodeAEADChunkSize(aeadConf.ChunkSizeByte()) + return &aeadEncrypter{ + aeadCrypter: aeadCrypter{ + aead: alg, + chunkSize: chunkSize, + associatedData: prefix, + chunkIndex: make([]byte, 8), + initialNonce: nonce, + }, + writer: writer}, nil +} + +// Write encrypts and writes bytes. It encrypts when necessary and buffers extra +// plaintext bytes for next call. When the stream is finished, Close() MUST be +// called to append the final tag. +func (aw *aeadEncrypter) Write(plaintextBytes []byte) (n int, err error) { + // Append plaintextBytes to existing buffered bytes + n, err = aw.buffer.Write(plaintextBytes) + if err != nil { + return n, err + } + // Encrypt and write chunks + for aw.buffer.Len() >= aw.chunkSize { + plainChunk := aw.buffer.Next(aw.chunkSize) + encryptedChunk, err := aw.sealChunk(plainChunk) + if err != nil { + return n, err + } + _, err = aw.writer.Write(encryptedChunk) + if err != nil { + return n, err + } + } + return +} + +// Close encrypts and writes the remaining buffered plaintext if any, appends +// the final authentication tag, and closes the embedded writer. This function +// MUST be called at the end of a stream. +func (aw *aeadEncrypter) Close() (err error) { + // Encrypt and write a chunk if there's buffered data left, or if we haven't + // written any chunks yet. + if aw.buffer.Len() > 0 || aw.bytesProcessed == 0 { + plainChunk := aw.buffer.Bytes() + lastEncryptedChunk, err := aw.sealChunk(plainChunk) + if err != nil { + return err + } + _, err = aw.writer.Write(lastEncryptedChunk) + if err != nil { + return err + } + } + // Compute final tag (associated data: packet tag, version, cipher, aead, + // chunk size, index, total number of encrypted octets). + adata := append(aw.associatedData[:], aw.chunkIndex[:]...) + adata = append(adata, make([]byte, 8)...) + binary.BigEndian.PutUint64(adata[13:], uint64(aw.bytesProcessed)) + nonce := aw.computeNextNonce() + finalTag := aw.aead.Seal(nil, nonce, nil, adata) + _, err = aw.writer.Write(finalTag) + if err != nil { + return err + } + return aw.writer.Close() +} + +// sealChunk Encrypts and authenticates the given chunk. +func (aw *aeadEncrypter) sealChunk(data []byte) ([]byte, error) { + if len(data) > aw.chunkSize { + return nil, errors.AEADError("chunk exceeds maximum length") + } + if aw.associatedData == nil { + return nil, errors.AEADError("can't seal without headers") + } + adata := append(aw.associatedData, aw.chunkIndex...) + nonce := aw.computeNextNonce() + encrypted := aw.aead.Seal(nil, nonce, data, adata) + aw.bytesProcessed += len(data) + if err := aw.aeadCrypter.incrementIndex(); err != nil { + return nil, err + } + return encrypted, nil +} + +// openChunk decrypts and checks integrity of an encrypted chunk, returning +// the underlying plaintext and an error. It access peeked bytes from next +// chunk, to identify the last chunk and decrypt/validate accordingly. +func (ar *aeadDecrypter) openChunk(data []byte) ([]byte, error) { + tagLen := ar.aead.Overhead() + // Restore carried bytes from last call + chunkExtra := append(ar.peekedBytes, data...) + // 'chunk' contains encrypted bytes, followed by an authentication tag. + chunk := chunkExtra[:len(chunkExtra)-tagLen] + ar.peekedBytes = chunkExtra[len(chunkExtra)-tagLen:] + adata := append(ar.associatedData, ar.chunkIndex...) + nonce := ar.computeNextNonce() + plainChunk, err := ar.aead.Open(nil, nonce, chunk, adata) + if err != nil { + return nil, err + } + ar.bytesProcessed += len(plainChunk) + if err = ar.aeadCrypter.incrementIndex(); err != nil { + return nil, err + } + return plainChunk, nil +} + +// Checks the summary tag. It takes into account the total decrypted bytes into +// the associated data. It returns an error, or nil if the tag is valid. +func (ar *aeadDecrypter) validateFinalTag(tag []byte) error { + // Associated: tag, version, cipher, aead, chunk size, index, and octets + amountBytes := make([]byte, 8) + binary.BigEndian.PutUint64(amountBytes, uint64(ar.bytesProcessed)) + adata := append(ar.associatedData, ar.chunkIndex...) + adata = append(adata, amountBytes...) + nonce := ar.computeNextNonce() + _, err := ar.aead.Open(nil, nonce, tag, adata) + if err != nil { + return err + } + return nil +} + +// Associated data for chunks: tag, version, cipher, mode, chunk size byte +func (ae *AEADEncrypted) associatedData() []byte { + return []byte{ + 0xD4, + aeadEncryptedVersion, + byte(ae.cipher), + byte(ae.mode), + ae.chunkSizeByte} +} + +// computeNonce takes the incremental index and computes an eXclusive OR with +// the least significant 8 bytes of the receivers' initial nonce (see sec. +// 5.16.1 and 5.16.2). It returns the resulting nonce. +func (wo *aeadCrypter) computeNextNonce() (nonce []byte) { + nonce = make([]byte, len(wo.initialNonce)) + copy(nonce, wo.initialNonce) + offset := len(wo.initialNonce) - 8 + for i := 0; i < 8; i++ { + nonce[i+offset] ^= wo.chunkIndex[i] + } + return +} + +// incrementIndex perfoms an integer increment by 1 of the integer represented by the +// slice, modifying it accordingly. +func (wo *aeadCrypter) incrementIndex() error { + index := wo.chunkIndex + if len(index) == 0 { + return errors.AEADError("Index has length 0") + } + for i := len(index) - 1; i >= 0; i-- { + if index[i] < 255 { + index[i]++ + return nil + } + index[i] = 0 + } + return errors.AEADError("cannot further increment index") +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go new file mode 100644 index 0000000000..c55cecd357 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go @@ -0,0 +1,123 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "compress/bzip2" + "compress/flate" + "compress/zlib" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "io" + "strconv" +) + +// Compressed represents a compressed OpenPGP packet. The decompressed contents +// will contain more OpenPGP packets. See RFC 4880, section 5.6. +type Compressed struct { + Body io.Reader +} + +const ( + NoCompression = flate.NoCompression + BestSpeed = flate.BestSpeed + BestCompression = flate.BestCompression + DefaultCompression = flate.DefaultCompression +) + +// CompressionConfig contains compressor configuration settings. +type CompressionConfig struct { + // Level is the compression level to use. It must be set to + // between -1 and 9, with -1 causing the compressor to use the + // default compression level, 0 causing the compressor to use + // no compression and 1 to 9 representing increasing (better, + // slower) compression levels. If Level is less than -1 or + // more then 9, a non-nil error will be returned during + // encryption. See the constants above for convenient common + // settings for Level. + Level int +} + +func (c *Compressed) parse(r io.Reader) error { + var buf [1]byte + _, err := readFull(r, buf[:]) + if err != nil { + return err + } + + switch buf[0] { + case 1: + c.Body = flate.NewReader(r) + case 2: + c.Body, err = zlib.NewReader(r) + case 3: + c.Body = bzip2.NewReader(r) + default: + err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0]))) + } + + return err +} + +// compressedWriterCloser represents the serialized compression stream +// header and the compressor. Its Close() method ensures that both the +// compressor and serialized stream header are closed. Its Write() +// method writes to the compressor. +type compressedWriteCloser struct { + sh io.Closer // Stream Header + c io.WriteCloser // Compressor +} + +func (cwc compressedWriteCloser) Write(p []byte) (int, error) { + return cwc.c.Write(p) +} + +func (cwc compressedWriteCloser) Close() (err error) { + err = cwc.c.Close() + if err != nil { + return err + } + + return cwc.sh.Close() +} + +// SerializeCompressed serializes a compressed data packet to w and +// returns a WriteCloser to which the literal data packets themselves +// can be written and which MUST be closed on completion. If cc is +// nil, sensible defaults will be used to configure the compression +// algorithm. +func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) { + compressed, err := serializeStreamHeader(w, packetTypeCompressed) + if err != nil { + return + } + + _, err = compressed.Write([]byte{uint8(algo)}) + if err != nil { + return + } + + level := DefaultCompression + if cc != nil { + level = cc.Level + } + + var compressor io.WriteCloser + switch algo { + case CompressionZIP: + compressor, err = flate.NewWriter(compressed, level) + case CompressionZLIB: + compressor, err = zlib.NewWriterLevel(compressed, level) + default: + s := strconv.Itoa(int(algo)) + err = errors.UnsupportedError("Unsupported compression algorithm: " + s) + } + if err != nil { + return + } + + literaldata = compressedWriteCloser{compressed, compressor} + + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go new file mode 100644 index 0000000000..5652dd8148 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go @@ -0,0 +1,167 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/rand" + "io" + "math/big" + "time" +) + +// Config collects a number of parameters along with sensible defaults. +// A nil *Config is valid and results in all default values. +type Config struct { + // Rand provides the source of entropy. + // If nil, the crypto/rand Reader is used. + Rand io.Reader + // DefaultHash is the default hash function to be used. + // If zero, SHA-256 is used. + DefaultHash crypto.Hash + // DefaultCipher is the cipher to be used. + // If zero, AES-128 is used. + DefaultCipher CipherFunction + // Time returns the current time as the number of seconds since the + // epoch. If Time is nil, time.Now is used. + Time func() time.Time + // DefaultCompressionAlgo is the compression algorithm to be + // applied to the plaintext before encryption. If zero, no + // compression is done. + DefaultCompressionAlgo CompressionAlgo + // CompressionConfig configures the compression settings. + CompressionConfig *CompressionConfig + // S2KCount is only used for symmetric encryption. It + // determines the strength of the passphrase stretching when + // the said passphrase is hashed to produce a key. S2KCount + // should be between 1024 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 65536 used. Not all + // values in the above range can be represented. S2KCount will + // be rounded up to the next representable value if it cannot + // be encoded exactly. When set, it is strongly encrouraged to + // use a value that is at least 65536. See RFC 4880 Section + // 3.7.1.3. + S2KCount int + // RSABits is the number of bits in new RSA keys made with NewEntity. + // If zero, then 2048 bit keys are created. + RSABits int + // The public key algorithm to use - will always create a signing primary + // key and encryption subkey. + Algorithm PublicKeyAlgorithm + // Some known primes that are optionally prepopulated by the caller + RSAPrimes []*big.Int + // AEADConfig configures the use of the new AEAD Encrypted Data Packet, + // defined in the draft of the next version of the OpenPGP specification. + // If a non-nil AEADConfig is passed, usage of this packet is enabled. By + // default, it is disabled. See the documentation of AEADConfig for more + // configuration options related to AEAD. + // **Note: using this option may break compatibility with other OpenPGP + // implementations, as well as future versions of this library.** + AEADConfig *AEADConfig + // V5Keys configures version 5 key generation. If false, this package still + // supports version 5 keys, but produces version 4 keys. + V5Keys bool + // "The validity period of the key. This is the number of seconds after + // the key creation time that the key expires. If this is not present + // or has a value of zero, the key never expires. This is found only on + // a self-signature."" + // https://tools.ietf.org/html/rfc4880#section-5.2.3.6 + KeyLifetimeSecs uint32 + // "The validity period of the signature. This is the number of seconds + // after the signature creation time that the signature expires. If + // this is not present or has a value of zero, it never expires." + // https://tools.ietf.org/html/rfc4880#section-5.2.3.10 + SigLifetimeSecs uint32 + // SigningKeyId is used to specify the signing key to use (by Key ID). + // By default, the signing key is selected automatically, preferring + // signing subkeys if available. + SigningKeyId uint64 +} + +func (c *Config) Random() io.Reader { + if c == nil || c.Rand == nil { + return rand.Reader + } + return c.Rand +} + +func (c *Config) Hash() crypto.Hash { + if c == nil || uint(c.DefaultHash) == 0 { + return crypto.SHA256 + } + return c.DefaultHash +} + +func (c *Config) Cipher() CipherFunction { + if c == nil || uint8(c.DefaultCipher) == 0 { + return CipherAES128 + } + return c.DefaultCipher +} + +func (c *Config) Now() time.Time { + if c == nil || c.Time == nil { + return time.Now() + } + return c.Time() +} + +// KeyLifetime returns the validity period of the key. +func (c *Config) KeyLifetime() uint32 { + if c == nil { + return 0 + } + return c.KeyLifetimeSecs +} + +// SigLifetime returns the validity period of the signature. +func (c *Config) SigLifetime() uint32 { + if c == nil { + return 0 + } + return c.SigLifetimeSecs +} + +func (c *Config) Compression() CompressionAlgo { + if c == nil { + return CompressionNone + } + return c.DefaultCompressionAlgo +} + +func (c *Config) PasswordHashIterations() int { + if c == nil || c.S2KCount == 0 { + return 0 + } + return c.S2KCount +} + +func (c *Config) RSAModulusBits() int { + if c == nil || c.RSABits == 0 { + return 2048 + } + return c.RSABits +} + +func (c *Config) PublicKeyAlgorithm() PublicKeyAlgorithm { + if c == nil || c.Algorithm == 0 { + return PubKeyAlgoRSA + } + return c.Algorithm +} + +func (c *Config) AEAD() *AEADConfig { + if c == nil { + return nil + } + return c.AEADConfig +} + +func (c *Config) SigningKey() uint64 { + if c == nil { + return 0 + } + return c.SigningKeyId +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go new file mode 100644 index 0000000000..801aec92b4 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go @@ -0,0 +1,282 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/rsa" + "encoding/binary" + "io" + "math/big" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/ecdh" + "github.com/ProtonMail/go-crypto/openpgp/elgamal" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" +) + +const encryptedKeyVersion = 3 + +// EncryptedKey represents a public-key encrypted session key. See RFC 4880, +// section 5.1. +type EncryptedKey struct { + KeyId uint64 + Algo PublicKeyAlgorithm + CipherFunc CipherFunction // only valid after a successful Decrypt + Key []byte // only valid after a successful Decrypt + + encryptedMPI1, encryptedMPI2 encoding.Field +} + +func (e *EncryptedKey) parse(r io.Reader) (err error) { + var buf [10]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != encryptedKeyVersion { + return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) + } + e.KeyId = binary.BigEndian.Uint64(buf[1:9]) + e.Algo = PublicKeyAlgorithm(buf[9]) + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + e.encryptedMPI1 = new(encoding.MPI) + if _, err = e.encryptedMPI1.ReadFrom(r); err != nil { + return + } + case PubKeyAlgoElGamal: + e.encryptedMPI1 = new(encoding.MPI) + if _, err = e.encryptedMPI1.ReadFrom(r); err != nil { + return + } + + e.encryptedMPI2 = new(encoding.MPI) + if _, err = e.encryptedMPI2.ReadFrom(r); err != nil { + return + } + case PubKeyAlgoECDH: + e.encryptedMPI1 = new(encoding.MPI) + if _, err = e.encryptedMPI1.ReadFrom(r); err != nil { + return + } + + e.encryptedMPI2 = new(encoding.OID) + if _, err = e.encryptedMPI2.ReadFrom(r); err != nil { + return + } + } + _, err = consumeAll(r) + return +} + +func checksumKeyMaterial(key []byte) uint16 { + var checksum uint16 + for _, v := range key { + checksum += uint16(v) + } + return checksum +} + +// Decrypt decrypts an encrypted session key with the given private key. The +// private key must have been decrypted first. +// If config is nil, sensible defaults will be used. +func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { + if e.KeyId != 0 && e.KeyId != priv.KeyId { + return errors.InvalidArgumentError("cannot decrypt encrypted session key for key id " + strconv.FormatUint(e.KeyId, 16) + " with private key id " + strconv.FormatUint(priv.KeyId, 16)) + } + if e.Algo != priv.PubKeyAlgo { + return errors.InvalidArgumentError("cannot decrypt encrypted session key of type " + strconv.Itoa(int(e.Algo)) + " with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) + } + if priv.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + + var err error + var b []byte + + // TODO(agl): use session key decryption routines here to avoid + // padding oracle attacks. + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + // Supports both *rsa.PrivateKey and crypto.Decrypter + k := priv.PrivateKey.(crypto.Decrypter) + b, err = k.Decrypt(config.Random(), padToKeySize(k.Public().(*rsa.PublicKey), e.encryptedMPI1.Bytes()), nil) + case PubKeyAlgoElGamal: + c1 := new(big.Int).SetBytes(e.encryptedMPI1.Bytes()) + c2 := new(big.Int).SetBytes(e.encryptedMPI2.Bytes()) + b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2) + case PubKeyAlgoECDH: + vsG := e.encryptedMPI1.Bytes() + m := e.encryptedMPI2.Bytes() + oid := priv.PublicKey.oid.EncodedBytes() + b, err = ecdh.Decrypt(priv.PrivateKey.(*ecdh.PrivateKey), vsG, m, oid, priv.PublicKey.Fingerprint[:]) + default: + err = errors.InvalidArgumentError("cannot decrypt encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) + } + + if err != nil { + return err + } + + e.CipherFunc = CipherFunction(b[0]) + e.Key = b[1 : len(b)-2] + expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1]) + checksum := checksumKeyMaterial(e.Key) + if checksum != expectedChecksum { + return errors.StructuralError("EncryptedKey checksum incorrect") + } + + return nil +} + +// Serialize writes the encrypted key packet, e, to w. +func (e *EncryptedKey) Serialize(w io.Writer) error { + var mpiLen int + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + mpiLen = int(e.encryptedMPI1.EncodedLength()) + case PubKeyAlgoElGamal: + mpiLen = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength()) + case PubKeyAlgoECDH: + mpiLen = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength()) + default: + return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) + } + + err := serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen) + if err != nil { + return err + } + + w.Write([]byte{encryptedKeyVersion}) + binary.Write(w, binary.BigEndian, e.KeyId) + w.Write([]byte{byte(e.Algo)}) + + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + _, err := w.Write(e.encryptedMPI1.EncodedBytes()) + return err + case PubKeyAlgoElGamal: + if _, err := w.Write(e.encryptedMPI1.EncodedBytes()); err != nil { + return err + } + _, err := w.Write(e.encryptedMPI2.EncodedBytes()) + return err + case PubKeyAlgoECDH: + if _, err := w.Write(e.encryptedMPI1.EncodedBytes()); err != nil { + return err + } + _, err := w.Write(e.encryptedMPI2.EncodedBytes()) + return err + default: + panic("internal error") + } +} + +// SerializeEncryptedKey serializes an encrypted key packet to w that contains +// key, encrypted to pub. +// If config is nil, sensible defaults will be used. +func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { + var buf [10]byte + buf[0] = encryptedKeyVersion + binary.BigEndian.PutUint64(buf[1:9], pub.KeyId) + buf[9] = byte(pub.PubKeyAlgo) + + keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */) + keyBlock[0] = byte(cipherFunc) + copy(keyBlock[1:], key) + checksum := checksumKeyMaterial(key) + keyBlock[1+len(key)] = byte(checksum >> 8) + keyBlock[1+len(key)+1] = byte(checksum) + + switch pub.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock) + case PubKeyAlgoElGamal: + return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock) + case PubKeyAlgoECDH: + return serializeEncryptedKeyECDH(w, config.Random(), buf, pub.PublicKey.(*ecdh.PublicKey), keyBlock, pub.oid, pub.Fingerprint) + case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly: + return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) + } + + return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) +} + +func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error { + cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("RSA encryption failed: " + err.Error()) + } + + cipherMPI := encoding.NewMPI(cipherText) + packetLen := 10 /* header length */ + int(cipherMPI.EncodedLength()) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + _, err = w.Write(header[:]) + if err != nil { + return err + } + _, err = w.Write(cipherMPI.EncodedBytes()) + return err +} + +func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error { + c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error()) + } + + packetLen := 10 /* header length */ + packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8 + packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8 + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + _, err = w.Write(header[:]) + if err != nil { + return err + } + if _, err = w.Write(new(encoding.MPI).SetBig(c1).EncodedBytes()); err != nil { + return err + } + _, err = w.Write(new(encoding.MPI).SetBig(c2).EncodedBytes()) + return err +} + +func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header [10]byte, pub *ecdh.PublicKey, keyBlock []byte, oid encoding.Field, fingerprint []byte) error { + vsG, c, err := ecdh.Encrypt(rand, pub, keyBlock, oid.EncodedBytes(), fingerprint) + if err != nil { + return errors.InvalidArgumentError("ECDH encryption failed: " + err.Error()) + } + + g := encoding.NewMPI(vsG) + m := encoding.NewOID(c) + + packetLen := 10 /* header length */ + packetLen += int(g.EncodedLength()) + int(m.EncodedLength()) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + + _, err = w.Write(header[:]) + if err != nil { + return err + } + if _, err = w.Write(g.EncodedBytes()); err != nil { + return err + } + _, err = w.Write(m.EncodedBytes()) + return err +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go new file mode 100644 index 0000000000..4be987609b --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go @@ -0,0 +1,91 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "encoding/binary" + "io" +) + +// LiteralData represents an encrypted file. See RFC 4880, section 5.9. +type LiteralData struct { + Format uint8 + IsBinary bool + FileName string + Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined. + Body io.Reader +} + +// ForEyesOnly returns whether the contents of the LiteralData have been marked +// as especially sensitive. +func (l *LiteralData) ForEyesOnly() bool { + return l.FileName == "_CONSOLE" +} + +func (l *LiteralData) parse(r io.Reader) (err error) { + var buf [256]byte + + _, err = readFull(r, buf[:2]) + if err != nil { + return + } + + l.Format = buf[0] + l.IsBinary = l.Format == 'b' + fileNameLen := int(buf[1]) + + _, err = readFull(r, buf[:fileNameLen]) + if err != nil { + return + } + + l.FileName = string(buf[:fileNameLen]) + + _, err = readFull(r, buf[:4]) + if err != nil { + return + } + + l.Time = binary.BigEndian.Uint32(buf[:4]) + l.Body = r + return +} + +// SerializeLiteral serializes a literal data packet to w and returns a +// WriteCloser to which the data itself can be written and which MUST be closed +// on completion. The fileName is truncated to 255 bytes. +func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) { + var buf [4]byte + buf[0] = 't' + if isBinary { + buf[0] = 'b' + } + if len(fileName) > 255 { + fileName = fileName[:255] + } + buf[1] = byte(len(fileName)) + + inner, err := serializeStreamHeader(w, packetTypeLiteralData) + if err != nil { + return + } + + _, err = inner.Write(buf[:2]) + if err != nil { + return + } + _, err = inner.Write([]byte(fileName)) + if err != nil { + return + } + binary.BigEndian.PutUint32(buf[:], time) + _, err = inner.Write(buf[:]) + if err != nil { + return + } + + plaintext = inner + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go new file mode 100644 index 0000000000..4f26d0a00b --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go @@ -0,0 +1,137 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9 + +package packet + +import ( + "crypto/cipher" +) + +type ocfbEncrypter struct { + b cipher.Block + fre []byte + outUsed int +} + +// An OCFBResyncOption determines if the "resynchronization step" of OCFB is +// performed. +type OCFBResyncOption bool + +const ( + OCFBResync OCFBResyncOption = true + OCFBNoResync OCFBResyncOption = false +) + +// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's +// cipher feedback mode using the given cipher.Block, and an initial amount of +// ciphertext. randData must be random bytes and be the same length as the +// cipher.Block's block size. Resync determines if the "resynchronization step" +// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on +// this point. +func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) { + blockSize := block.BlockSize() + if len(randData) != blockSize { + return nil, nil + } + + x := &ocfbEncrypter{ + b: block, + fre: make([]byte, blockSize), + outUsed: 0, + } + prefix := make([]byte, blockSize+2) + + block.Encrypt(x.fre, x.fre) + for i := 0; i < blockSize; i++ { + prefix[i] = randData[i] ^ x.fre[i] + } + + block.Encrypt(x.fre, prefix[:blockSize]) + prefix[blockSize] = x.fre[0] ^ randData[blockSize-2] + prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1] + + if resync { + block.Encrypt(x.fre, prefix[2:]) + } else { + x.fre[0] = prefix[blockSize] + x.fre[1] = prefix[blockSize+1] + x.outUsed = 2 + } + return x, prefix +} + +func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) { + for i := 0; i < len(src); i++ { + if x.outUsed == len(x.fre) { + x.b.Encrypt(x.fre, x.fre) + x.outUsed = 0 + } + + x.fre[x.outUsed] ^= src[i] + dst[i] = x.fre[x.outUsed] + x.outUsed++ + } +} + +type ocfbDecrypter struct { + b cipher.Block + fre []byte + outUsed int +} + +// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's +// cipher feedback mode using the given cipher.Block. Prefix must be the first +// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's +// block size. On successful exit, blockSize+2 bytes of decrypted data are written into +// prefix. Resync determines if the "resynchronization step" from RFC 4880, +// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point. +func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream { + blockSize := block.BlockSize() + if len(prefix) != blockSize+2 { + return nil + } + + x := &ocfbDecrypter{ + b: block, + fre: make([]byte, blockSize), + outUsed: 0, + } + prefixCopy := make([]byte, len(prefix)) + copy(prefixCopy, prefix) + + block.Encrypt(x.fre, x.fre) + for i := 0; i < blockSize; i++ { + prefixCopy[i] ^= x.fre[i] + } + + block.Encrypt(x.fre, prefix[:blockSize]) + prefixCopy[blockSize] ^= x.fre[0] + prefixCopy[blockSize+1] ^= x.fre[1] + + if resync { + block.Encrypt(x.fre, prefix[2:]) + } else { + x.fre[0] = prefix[blockSize] + x.fre[1] = prefix[blockSize+1] + x.outUsed = 2 + } + copy(prefix, prefixCopy) + return x +} + +func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) { + for i := 0; i < len(src); i++ { + if x.outUsed == len(x.fre) { + x.b.Encrypt(x.fre, x.fre) + x.outUsed = 0 + } + + c := src[i] + dst[i] = x.fre[x.outUsed] ^ src[i] + x.fre[x.outUsed] = c + x.outUsed++ + } +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go new file mode 100644 index 0000000000..41c35de219 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go @@ -0,0 +1,73 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "encoding/binary" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/s2k" + "io" + "strconv" +) + +// OnePassSignature represents a one-pass signature packet. See RFC 4880, +// section 5.4. +type OnePassSignature struct { + SigType SignatureType + Hash crypto.Hash + PubKeyAlgo PublicKeyAlgorithm + KeyId uint64 + IsLast bool +} + +const onePassSignatureVersion = 3 + +func (ops *OnePassSignature) parse(r io.Reader) (err error) { + var buf [13]byte + + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != onePassSignatureVersion { + err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) + } + + var ok bool + ops.Hash, ok = s2k.HashIdToHash(buf[2]) + if !ok { + return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2]))) + } + + ops.SigType = SignatureType(buf[1]) + ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) + ops.KeyId = binary.BigEndian.Uint64(buf[4:12]) + ops.IsLast = buf[12] != 0 + return +} + +// Serialize marshals the given OnePassSignature to w. +func (ops *OnePassSignature) Serialize(w io.Writer) error { + var buf [13]byte + buf[0] = onePassSignatureVersion + buf[1] = uint8(ops.SigType) + var ok bool + buf[2], ok = s2k.HashToHashId(ops.Hash) + if !ok { + return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) + } + buf[3] = uint8(ops.PubKeyAlgo) + binary.BigEndian.PutUint64(buf[4:12], ops.KeyId) + if ops.IsLast { + buf[12] = 1 + } + + if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil { + return err + } + _, err := w.Write(buf[:]) + return err +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go new file mode 100644 index 0000000000..e3879199e6 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go @@ -0,0 +1,162 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "io" + "io/ioutil" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is +// useful for splitting and storing the original packet contents separately, +// handling unsupported packet types or accessing parts of the packet not yet +// implemented by this package. +type OpaquePacket struct { + // Packet type + Tag uint8 + // Reason why the packet was parsed opaquely + Reason error + // Binary contents of the packet data + Contents []byte +} + +func (op *OpaquePacket) parse(r io.Reader) (err error) { + op.Contents, err = ioutil.ReadAll(r) + return +} + +// Serialize marshals the packet to a writer in its original form, including +// the packet header. +func (op *OpaquePacket) Serialize(w io.Writer) (err error) { + err = serializeHeader(w, packetType(op.Tag), len(op.Contents)) + if err == nil { + _, err = w.Write(op.Contents) + } + return +} + +// Parse attempts to parse the opaque contents into a structure supported by +// this package. If the packet is not known then the result will be another +// OpaquePacket. +func (op *OpaquePacket) Parse() (p Packet, err error) { + hdr := bytes.NewBuffer(nil) + err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents)) + if err != nil { + op.Reason = err + return op, err + } + p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents))) + if err != nil { + op.Reason = err + p = op + } + return +} + +// OpaqueReader reads OpaquePackets from an io.Reader. +type OpaqueReader struct { + r io.Reader +} + +func NewOpaqueReader(r io.Reader) *OpaqueReader { + return &OpaqueReader{r: r} +} + +// Read the next OpaquePacket. +func (or *OpaqueReader) Next() (op *OpaquePacket, err error) { + tag, _, contents, err := readHeader(or.r) + if err != nil { + return + } + op = &OpaquePacket{Tag: uint8(tag), Reason: err} + err = op.parse(contents) + if err != nil { + consumeAll(contents) + } + return +} + +// OpaqueSubpacket represents an unparsed OpenPGP subpacket, +// as found in signature and user attribute packets. +type OpaqueSubpacket struct { + SubType uint8 + Contents []byte +} + +// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from +// their byte representation. +func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) { + var ( + subHeaderLen int + subPacket *OpaqueSubpacket + ) + for len(contents) > 0 { + subHeaderLen, subPacket, err = nextSubpacket(contents) + if err != nil { + break + } + result = append(result, subPacket) + contents = contents[subHeaderLen+len(subPacket.Contents):] + } + return +} + +func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) { + // RFC 4880, section 5.2.3.1 + var subLen uint32 + if len(contents) < 1 { + goto Truncated + } + subPacket = &OpaqueSubpacket{} + switch { + case contents[0] < 192: + subHeaderLen = 2 // 1 length byte, 1 subtype byte + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[0]) + contents = contents[1:] + case contents[0] < 255: + subHeaderLen = 3 // 2 length bytes, 1 subtype + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192 + contents = contents[2:] + default: + subHeaderLen = 6 // 5 length bytes, 1 subtype + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[1])<<24 | + uint32(contents[2])<<16 | + uint32(contents[3])<<8 | + uint32(contents[4]) + contents = contents[5:] + } + if subLen > uint32(len(contents)) || subLen == 0 { + goto Truncated + } + subPacket.SubType = contents[0] + subPacket.Contents = contents[1:subLen] + return +Truncated: + err = errors.StructuralError("subpacket truncated") + return +} + +func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) { + buf := make([]byte, 6) + n := serializeSubpacketLength(buf, len(osp.Contents)+1) + buf[n] = osp.SubType + if _, err = w.Write(buf[:n+1]); err != nil { + return + } + _, err = w.Write(osp.Contents) + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go new file mode 100644 index 0000000000..fe0da9d368 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go @@ -0,0 +1,522 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packet implements parsing and serialization of OpenPGP packets, as +// specified in RFC 4880. +package packet // import "github.com/ProtonMail/go-crypto/openpgp/packet" + +import ( + "bytes" + "crypto/cipher" + "crypto/rsa" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" +) + +// readFull is the same as io.ReadFull except that reading zero bytes returns +// ErrUnexpectedEOF rather than EOF. +func readFull(r io.Reader, buf []byte) (n int, err error) { + n, err = io.ReadFull(r, buf) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2. +func readLength(r io.Reader) (length int64, isPartial bool, err error) { + var buf [4]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + switch { + case buf[0] < 192: + length = int64(buf[0]) + case buf[0] < 224: + length = int64(buf[0]-192) << 8 + _, err = readFull(r, buf[0:1]) + if err != nil { + return + } + length += int64(buf[0]) + 192 + case buf[0] < 255: + length = int64(1) << (buf[0] & 0x1f) + isPartial = true + default: + _, err = readFull(r, buf[0:4]) + if err != nil { + return + } + length = int64(buf[0])<<24 | + int64(buf[1])<<16 | + int64(buf[2])<<8 | + int64(buf[3]) + } + return +} + +// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths. +// The continuation lengths are parsed and removed from the stream and EOF is +// returned at the end of the packet. See RFC 4880, section 4.2.2.4. +type partialLengthReader struct { + r io.Reader + remaining int64 + isPartial bool +} + +func (r *partialLengthReader) Read(p []byte) (n int, err error) { + for r.remaining == 0 { + if !r.isPartial { + return 0, io.EOF + } + r.remaining, r.isPartial, err = readLength(r.r) + if err != nil { + return 0, err + } + } + + toRead := int64(len(p)) + if toRead > r.remaining { + toRead = r.remaining + } + + n, err = r.r.Read(p[:int(toRead)]) + r.remaining -= int64(n) + if n < int(toRead) && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// partialLengthWriter writes a stream of data using OpenPGP partial lengths. +// See RFC 4880, section 4.2.2.4. +type partialLengthWriter struct { + w io.WriteCloser + buf bytes.Buffer + lengthByte [1]byte +} + +func (w *partialLengthWriter) Write(p []byte) (n int, err error) { + bufLen := w.buf.Len() + if bufLen > 512 { + for power := uint(30); ; power-- { + l := 1 << power + if bufLen >= l { + w.lengthByte[0] = 224 + uint8(power) + _, err = w.w.Write(w.lengthByte[:]) + if err != nil { + return + } + var m int + m, err = w.w.Write(w.buf.Next(l)) + if err != nil { + return + } + if m != l { + return 0, io.ErrShortWrite + } + break + } + } + } + return w.buf.Write(p) +} + +func (w *partialLengthWriter) Close() (err error) { + len := w.buf.Len() + err = serializeLength(w.w, len) + if err != nil { + return err + } + _, err = w.buf.WriteTo(w.w) + if err != nil { + return err + } + return w.w.Close() +} + +// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the +// underlying Reader returns EOF before the limit has been reached. +type spanReader struct { + r io.Reader + n int64 +} + +func (l *spanReader) Read(p []byte) (n int, err error) { + if l.n <= 0 { + return 0, io.EOF + } + if int64(len(p)) > l.n { + p = p[0:l.n] + } + n, err = l.r.Read(p) + l.n -= int64(n) + if l.n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readHeader parses a packet header and returns an io.Reader which will return +// the contents of the packet. See RFC 4880, section 4.2. +func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) { + var buf [4]byte + _, err = io.ReadFull(r, buf[:1]) + if err != nil { + return + } + if buf[0]&0x80 == 0 { + err = errors.StructuralError("tag byte does not have MSB set") + return + } + if buf[0]&0x40 == 0 { + // Old format packet + tag = packetType((buf[0] & 0x3f) >> 2) + lengthType := buf[0] & 3 + if lengthType == 3 { + length = -1 + contents = r + return + } + lengthBytes := 1 << lengthType + _, err = readFull(r, buf[0:lengthBytes]) + if err != nil { + return + } + for i := 0; i < lengthBytes; i++ { + length <<= 8 + length |= int64(buf[i]) + } + contents = &spanReader{r, length} + return + } + + // New format packet + tag = packetType(buf[0] & 0x3f) + length, isPartial, err := readLength(r) + if err != nil { + return + } + if isPartial { + contents = &partialLengthReader{ + remaining: length, + isPartial: true, + r: r, + } + length = -1 + } else { + contents = &spanReader{r, length} + } + return +} + +// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section +// 4.2. +func serializeHeader(w io.Writer, ptype packetType, length int) (err error) { + err = serializeType(w, ptype) + if err != nil { + return + } + return serializeLength(w, length) +} + +// serializeType writes an OpenPGP packet type to w. See RFC 4880, section +// 4.2. +func serializeType(w io.Writer, ptype packetType) (err error) { + var buf [1]byte + buf[0] = 0x80 | 0x40 | byte(ptype) + _, err = w.Write(buf[:]) + return +} + +// serializeLength writes an OpenPGP packet length to w. See RFC 4880, section +// 4.2.2. +func serializeLength(w io.Writer, length int) (err error) { + var buf [5]byte + var n int + + if length < 192 { + buf[0] = byte(length) + n = 1 + } else if length < 8384 { + length -= 192 + buf[0] = 192 + byte(length>>8) + buf[1] = byte(length) + n = 2 + } else { + buf[0] = 255 + buf[1] = byte(length >> 24) + buf[2] = byte(length >> 16) + buf[3] = byte(length >> 8) + buf[4] = byte(length) + n = 5 + } + + _, err = w.Write(buf[:n]) + return +} + +// serializeStreamHeader writes an OpenPGP packet header to w where the +// length of the packet is unknown. It returns a io.WriteCloser which can be +// used to write the contents of the packet. See RFC 4880, section 4.2. +func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) { + err = serializeType(w, ptype) + if err != nil { + return + } + out = &partialLengthWriter{w: w} + return +} + +// Packet represents an OpenPGP packet. Users are expected to try casting +// instances of this interface to specific packet types. +type Packet interface { + parse(io.Reader) error +} + +// consumeAll reads from the given Reader until error, returning the number of +// bytes read. +func consumeAll(r io.Reader) (n int64, err error) { + var m int + var buf [1024]byte + + for { + m, err = r.Read(buf[:]) + n += int64(m) + if err == io.EOF { + err = nil + return + } + if err != nil { + return + } + } +} + +// packetType represents the numeric ids of the different OpenPGP packet types. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2 +type packetType uint8 + +const ( + packetTypeEncryptedKey packetType = 1 + packetTypeSignature packetType = 2 + packetTypeSymmetricKeyEncrypted packetType = 3 + packetTypeOnePassSignature packetType = 4 + packetTypePrivateKey packetType = 5 + packetTypePublicKey packetType = 6 + packetTypePrivateSubkey packetType = 7 + packetTypeCompressed packetType = 8 + packetTypeSymmetricallyEncrypted packetType = 9 + packetTypeLiteralData packetType = 11 + packetTypeUserId packetType = 13 + packetTypePublicSubkey packetType = 14 + packetTypeUserAttribute packetType = 17 + packetTypeSymmetricallyEncryptedMDC packetType = 18 + packetTypeAEADEncrypted packetType = 20 +) + +// EncryptedDataPacket holds encrypted data. It is currently implemented by +// SymmetricallyEncrypted and AEADEncrypted. +type EncryptedDataPacket interface { + Decrypt(CipherFunction, []byte) (io.ReadCloser, error) +} + +// Read reads a single OpenPGP packet from the given io.Reader. If there is an +// error parsing a packet, the whole packet is consumed from the input. +func Read(r io.Reader) (p Packet, err error) { + tag, _, contents, err := readHeader(r) + if err != nil { + return + } + + switch tag { + case packetTypeEncryptedKey: + p = new(EncryptedKey) + case packetTypeSignature: + p = new(Signature) + case packetTypeSymmetricKeyEncrypted: + p = new(SymmetricKeyEncrypted) + case packetTypeOnePassSignature: + p = new(OnePassSignature) + case packetTypePrivateKey, packetTypePrivateSubkey: + pk := new(PrivateKey) + if tag == packetTypePrivateSubkey { + pk.IsSubkey = true + } + p = pk + case packetTypePublicKey, packetTypePublicSubkey: + isSubkey := tag == packetTypePublicSubkey + p = &PublicKey{IsSubkey: isSubkey} + case packetTypeCompressed: + p = new(Compressed) + case packetTypeSymmetricallyEncrypted: + err = errors.UnsupportedError("Symmetrically encrypted packets without MDC are not supported") + case packetTypeLiteralData: + p = new(LiteralData) + case packetTypeUserId: + p = new(UserId) + case packetTypeUserAttribute: + p = new(UserAttribute) + case packetTypeSymmetricallyEncryptedMDC: + se := new(SymmetricallyEncrypted) + se.MDC = true + p = se + case packetTypeAEADEncrypted: + p = new(AEADEncrypted) + default: + err = errors.UnknownPacketTypeError(tag) + } + if p != nil { + err = p.parse(contents) + } + if err != nil { + consumeAll(contents) + } + return +} + +// SignatureType represents the different semantic meanings of an OpenPGP +// signature. See RFC 4880, section 5.2.1. +type SignatureType uint8 + +const ( + SigTypeBinary SignatureType = 0x00 + SigTypeText = 0x01 + SigTypeGenericCert = 0x10 + SigTypePersonaCert = 0x11 + SigTypeCasualCert = 0x12 + SigTypePositiveCert = 0x13 + SigTypeSubkeyBinding = 0x18 + SigTypePrimaryKeyBinding = 0x19 + SigTypeDirectSignature = 0x1F + SigTypeKeyRevocation = 0x20 + SigTypeSubkeyRevocation = 0x28 +) + +// PublicKeyAlgorithm represents the different public key system specified for +// OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12 +type PublicKeyAlgorithm uint8 + +const ( + PubKeyAlgoRSA PublicKeyAlgorithm = 1 + PubKeyAlgoElGamal PublicKeyAlgorithm = 16 + PubKeyAlgoDSA PublicKeyAlgorithm = 17 + // RFC 6637, Section 5. + PubKeyAlgoECDH PublicKeyAlgorithm = 18 + PubKeyAlgoECDSA PublicKeyAlgorithm = 19 + // https://www.ietf.org/archive/id/draft-koch-eddsa-for-openpgp-04.txt + PubKeyAlgoEdDSA PublicKeyAlgorithm = 22 + + // Deprecated in RFC 4880, Section 13.5. Use key flags instead. + PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 + PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3 +) + +// CanEncrypt returns true if it's possible to encrypt a message to a public +// key of the given type. +func (pka PublicKeyAlgorithm) CanEncrypt() bool { + switch pka { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH: + return true + } + return false +} + +// CanSign returns true if it's possible for a public key of the given type to +// sign a message. +func (pka PublicKeyAlgorithm) CanSign() bool { + switch pka { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA: + return true + } + return false +} + +// CipherFunction represents the different block ciphers specified for OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 +type CipherFunction algorithm.CipherFunction + +const ( + Cipher3DES CipherFunction = 2 + CipherCAST5 CipherFunction = 3 + CipherAES128 CipherFunction = 7 + CipherAES192 CipherFunction = 8 + CipherAES256 CipherFunction = 9 +) + +// KeySize returns the key size, in bytes, of cipher. +func (cipher CipherFunction) KeySize() int { + return algorithm.CipherFunction(cipher).KeySize() +} + +// blockSize returns the block size, in bytes, of cipher. +func (cipher CipherFunction) blockSize() int { + return algorithm.CipherFunction(cipher).BlockSize() +} + +// new returns a fresh instance of the given cipher. +func (cipher CipherFunction) new(key []byte) (block cipher.Block) { + return algorithm.CipherFunction(cipher).New(key) +} + +// padToKeySize left-pads a MPI with zeroes to match the length of the +// specified RSA public. +func padToKeySize(pub *rsa.PublicKey, b []byte) []byte { + k := (pub.N.BitLen() + 7) / 8 + if len(b) >= k { + return b + } + bb := make([]byte, k) + copy(bb[len(bb)-len(b):], b) + return bb +} + +// CompressionAlgo Represents the different compression algorithms +// supported by OpenPGP (except for BZIP2, which is not currently +// supported). See Section 9.3 of RFC 4880. +type CompressionAlgo uint8 + +const ( + CompressionNone CompressionAlgo = 0 + CompressionZIP CompressionAlgo = 1 + CompressionZLIB CompressionAlgo = 2 +) + +// AEADMode represents the different Authenticated Encryption with Associated +// Data specified for OpenPGP. +type AEADMode algorithm.AEADMode + +const ( + AEADModeEAX AEADMode = 1 + AEADModeOCB AEADMode = 2 + AEADModeExperimentalGCM AEADMode = 100 +) + +func (mode AEADMode) NonceLength() int { + return algorithm.AEADMode(mode).NonceLength() +} + +func (mode AEADMode) TagLength() int { + return algorithm.AEADMode(mode).TagLength() +} + +// new returns a fresh instance of the given mode. +func (mode AEADMode) new(block cipher.Block) cipher.AEAD { + return algorithm.AEADMode(mode).New(block) +} + +// ReasonForRevocation represents a revocation reason code as per RFC4880 +// section 5.2.3.23. +type ReasonForRevocation uint8 + +const ( + NoReason ReasonForRevocation = 0 + KeySuperseded ReasonForRevocation = 1 + KeyCompromised ReasonForRevocation = 2 + KeyRetired ReasonForRevocation = 3 +) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go new file mode 100644 index 0000000000..854f9c4bbb --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go @@ -0,0 +1,780 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/cipher" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "fmt" + "io" + "io/ioutil" + "math/big" + "strconv" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" + "golang.org/x/crypto/curve25519" + + "github.com/ProtonMail/go-crypto/openpgp/ecdh" + "github.com/ProtonMail/go-crypto/openpgp/elgamal" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" + "github.com/ProtonMail/go-crypto/openpgp/s2k" + "golang.org/x/crypto/ed25519" +) + +// PrivateKey represents a possibly encrypted private key. See RFC 4880, +// section 5.5.3. +type PrivateKey struct { + PublicKey + Encrypted bool // if true then the private key is unavailable until Decrypt has been called. + encryptedData []byte + cipher CipherFunction + s2k func(out, in []byte) + // An *{rsa|dsa|elgamal|ecdh|ecdsa|ed25519}.PrivateKey or + // crypto.Signer/crypto.Decrypter (Decryptor RSA only). + PrivateKey interface{} + sha1Checksum bool + iv []byte + + // Type of encryption of the S2K packet + // Allowed values are 0 (Not encrypted), 254 (SHA1), or + // 255 (2-byte checksum) + s2kType S2KType + // Full parameters of the S2K packet + s2kParams *s2k.Params +} + +//S2KType s2k packet type +type S2KType uint8 + +const ( + // S2KNON unencrypt + S2KNON S2KType = 0 + // S2KSHA1 sha1 sum check + S2KSHA1 S2KType = 254 + // S2KCHECKSUM sum check + S2KCHECKSUM S2KType = 255 +) + +func NewRSAPrivateKey(creationTime time.Time, priv *rsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewDSAPrivateKey(creationTime time.Time, priv *dsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewDSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewElGamalPrivateKey(creationTime time.Time, priv *elgamal.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewECDSAPrivateKey(creationTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewECDSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewEdDSAPrivateKey(creationTime time.Time, priv *ed25519.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pub := priv.Public().(ed25519.PublicKey) + pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pub) + pk.PrivateKey = priv + return pk +} + +func NewECDHPrivateKey(creationTime time.Time, priv *ecdh.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewECDHPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that +// implements RSA, ECDSA or EdDSA. +func NewSignerPrivateKey(creationTime time.Time, signer crypto.Signer) *PrivateKey { + pk := new(PrivateKey) + // In general, the public Keys should be used as pointers. We still + // type-switch on the values, for backwards-compatibility. + switch pubkey := signer.Public().(type) { + case *rsa.PublicKey: + pk.PublicKey = *NewRSAPublicKey(creationTime, pubkey) + case rsa.PublicKey: + pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey) + case *ecdsa.PublicKey: + pk.PublicKey = *NewECDSAPublicKey(creationTime, pubkey) + case ecdsa.PublicKey: + pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey) + case *ed25519.PublicKey: + pk.PublicKey = *NewEdDSAPublicKey(creationTime, pubkey) + case ed25519.PublicKey: + pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pubkey) + default: + panic("openpgp: unknown crypto.Signer type in NewSignerPrivateKey") + } + pk.PrivateKey = signer + return pk +} + +// NewDecrypterPrivateKey creates a PrivateKey from a *{rsa|elgamal|ecdh}.PrivateKey. +func NewDecrypterPrivateKey(creationTime time.Time, decrypter interface{}) *PrivateKey { + pk := new(PrivateKey) + switch priv := decrypter.(type) { + case *rsa.PrivateKey: + pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey) + case *elgamal.PrivateKey: + pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey) + case *ecdh.PrivateKey: + pk.PublicKey = *NewECDHPublicKey(creationTime, &priv.PublicKey) + default: + panic("openpgp: unknown decrypter type in NewDecrypterPrivateKey") + } + pk.PrivateKey = decrypter + return pk +} + +func (pk *PrivateKey) parse(r io.Reader) (err error) { + err = (&pk.PublicKey).parse(r) + if err != nil { + return + } + v5 := pk.PublicKey.Version == 5 + + var buf [1]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + pk.s2kType = S2KType(buf[0]) + var optCount [1]byte + if v5 { + if _, err = readFull(r, optCount[:]); err != nil { + return + } + } + + switch pk.s2kType { + case S2KNON: + pk.s2k = nil + pk.Encrypted = false + case S2KSHA1, S2KCHECKSUM: + if v5 && pk.s2kType == S2KCHECKSUM { + return errors.StructuralError("wrong s2k identifier for version 5") + } + _, err = readFull(r, buf[:]) + if err != nil { + return + } + pk.cipher = CipherFunction(buf[0]) + pk.s2kParams, err = s2k.ParseIntoParams(r) + if err != nil { + return + } + if pk.s2kParams.Dummy() { + return + } + pk.s2k, err = pk.s2kParams.Function() + if err != nil { + return + } + pk.Encrypted = true + if pk.s2kType == S2KSHA1 { + pk.sha1Checksum = true + } + default: + return errors.UnsupportedError("deprecated s2k function in private key") + } + + if pk.Encrypted { + blockSize := pk.cipher.blockSize() + if blockSize == 0 { + return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) + } + pk.iv = make([]byte, blockSize) + _, err = readFull(r, pk.iv) + if err != nil { + return + } + } + + var privateKeyData []byte + if v5 { + var n [4]byte /* secret material four octet count */ + _, err = readFull(r, n[:]) + if err != nil { + return + } + count := uint32(uint32(n[0])<<24 | uint32(n[1])<<16 | uint32(n[2])<<8 | uint32(n[3])) + if !pk.Encrypted { + count = count + 2 /* two octet checksum */ + } + privateKeyData = make([]byte, count) + _, err = readFull(r, privateKeyData) + if err != nil { + return + } + } else { + privateKeyData, err = ioutil.ReadAll(r) + if err != nil { + return + } + } + if !pk.Encrypted { + return pk.parsePrivateKey(privateKeyData) + } + + pk.encryptedData = privateKeyData + return +} + +// Dummy returns true if the private key is a dummy key. This is a GNU extension. +func (pk *PrivateKey) Dummy() bool { + return pk.s2kParams.Dummy() +} + +func mod64kHash(d []byte) uint16 { + var h uint16 + for _, b := range d { + h += uint16(b) + } + return h +} + +func (pk *PrivateKey) Serialize(w io.Writer) (err error) { + contents := bytes.NewBuffer(nil) + err = pk.PublicKey.serializeWithoutHeaders(contents) + if err != nil { + return + } + if _, err = contents.Write([]byte{uint8(pk.s2kType)}); err != nil { + return + } + + optional := bytes.NewBuffer(nil) + if pk.Encrypted || pk.Dummy() { + optional.Write([]byte{uint8(pk.cipher)}) + if err := pk.s2kParams.Serialize(optional); err != nil { + return err + } + if pk.Encrypted { + optional.Write(pk.iv) + } + } + if pk.Version == 5 { + contents.Write([]byte{uint8(optional.Len())}) + } + io.Copy(contents, optional) + + if !pk.Dummy() { + l := 0 + var priv []byte + if !pk.Encrypted { + buf := bytes.NewBuffer(nil) + err = pk.serializePrivateKey(buf) + if err != nil { + return err + } + l = buf.Len() + if pk.sha1Checksum { + h := sha1.New() + h.Write(buf.Bytes()) + buf.Write(h.Sum(nil)) + } else { + checksum := mod64kHash(buf.Bytes()) + buf.Write([]byte{byte(checksum >> 8), byte(checksum)}) + } + priv = buf.Bytes() + } else { + priv, l = pk.encryptedData, len(pk.encryptedData) + } + + if pk.Version == 5 { + contents.Write([]byte{byte(l >> 24), byte(l >> 16), byte(l >> 8), byte(l)}) + } + contents.Write(priv) + } + + ptype := packetTypePrivateKey + if pk.IsSubkey { + ptype = packetTypePrivateSubkey + } + err = serializeHeader(w, ptype, contents.Len()) + if err != nil { + return + } + _, err = io.Copy(w, contents) + if err != nil { + return + } + return +} + +func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error { + if _, err := w.Write(new(encoding.MPI).SetBig(priv.D).EncodedBytes()); err != nil { + return err + } + if _, err := w.Write(new(encoding.MPI).SetBig(priv.Primes[1]).EncodedBytes()); err != nil { + return err + } + if _, err := w.Write(new(encoding.MPI).SetBig(priv.Primes[0]).EncodedBytes()); err != nil { + return err + } + _, err := w.Write(new(encoding.MPI).SetBig(priv.Precomputed.Qinv).EncodedBytes()) + return err +} + +func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error { + _, err := w.Write(new(encoding.MPI).SetBig(priv.X).EncodedBytes()) + return err +} + +func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error { + _, err := w.Write(new(encoding.MPI).SetBig(priv.X).EncodedBytes()) + return err +} + +func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error { + _, err := w.Write(new(encoding.MPI).SetBig(priv.D).EncodedBytes()) + return err +} + +func serializeEdDSAPrivateKey(w io.Writer, priv *ed25519.PrivateKey) error { + keySize := ed25519.PrivateKeySize - ed25519.PublicKeySize + _, err := w.Write(encoding.NewMPI((*priv)[:keySize]).EncodedBytes()) + return err +} + +func serializeECDHPrivateKey(w io.Writer, priv *ecdh.PrivateKey) error { + _, err := w.Write(encoding.NewMPI(priv.D).EncodedBytes()) + return err +} + +// Decrypt decrypts an encrypted private key using a passphrase. +func (pk *PrivateKey) Decrypt(passphrase []byte) error { + if pk.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + if !pk.Encrypted { + return nil + } + + key := make([]byte, pk.cipher.KeySize()) + pk.s2k(key, passphrase) + block := pk.cipher.new(key) + cfb := cipher.NewCFBDecrypter(block, pk.iv) + + data := make([]byte, len(pk.encryptedData)) + cfb.XORKeyStream(data, pk.encryptedData) + + if pk.sha1Checksum { + if len(data) < sha1.Size { + return errors.StructuralError("truncated private key data") + } + h := sha1.New() + h.Write(data[:len(data)-sha1.Size]) + sum := h.Sum(nil) + if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-sha1.Size] + } else { + if len(data) < 2 { + return errors.StructuralError("truncated private key data") + } + var sum uint16 + for i := 0; i < len(data)-2; i++ { + sum += uint16(data[i]) + } + if data[len(data)-2] != uint8(sum>>8) || + data[len(data)-1] != uint8(sum) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-2] + } + + err := pk.parsePrivateKey(data) + if _, ok := err.(errors.KeyInvalidError); ok { + return errors.KeyInvalidError("invalid key parameters") + } + if err != nil { + return err + } + + // Mark key as unencrypted + pk.s2kType = S2KNON + pk.s2k = nil + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +// Encrypt encrypts an unencrypted private key using a passphrase. +func (pk *PrivateKey) Encrypt(passphrase []byte) error { + priv := bytes.NewBuffer(nil) + err := pk.serializePrivateKey(priv) + if err != nil { + return err + } + + //Default config of private key encryption + pk.cipher = CipherAES256 + s2kConfig := &s2k.Config{ + S2KMode: 3, //Iterated + S2KCount: 65536, + Hash: crypto.SHA256, + } + + pk.s2kParams, err = s2k.Generate(rand.Reader, s2kConfig) + if err != nil { + return err + } + privateKeyBytes := priv.Bytes() + key := make([]byte, pk.cipher.KeySize()) + + pk.sha1Checksum = true + pk.s2k, err = pk.s2kParams.Function() + if err != nil { + return err + } + pk.s2k(key, passphrase) + block := pk.cipher.new(key) + pk.iv = make([]byte, pk.cipher.blockSize()) + _, err = rand.Read(pk.iv) + if err != nil { + return err + } + cfb := cipher.NewCFBEncrypter(block, pk.iv) + + if pk.sha1Checksum { + pk.s2kType = S2KSHA1 + h := sha1.New() + h.Write(privateKeyBytes) + sum := h.Sum(nil) + privateKeyBytes = append(privateKeyBytes, sum...) + } else { + pk.s2kType = S2KCHECKSUM + var sum uint16 + for _, b := range privateKeyBytes { + sum += uint16(b) + } + priv.Write([]byte{uint8(sum >> 8), uint8(sum)}) + } + + pk.encryptedData = make([]byte, len(privateKeyBytes)) + cfb.XORKeyStream(pk.encryptedData, privateKeyBytes) + pk.Encrypted = true + pk.PrivateKey = nil + return err +} + +func (pk *PrivateKey) serializePrivateKey(w io.Writer) (err error) { + switch priv := pk.PrivateKey.(type) { + case *rsa.PrivateKey: + err = serializeRSAPrivateKey(w, priv) + case *dsa.PrivateKey: + err = serializeDSAPrivateKey(w, priv) + case *elgamal.PrivateKey: + err = serializeElGamalPrivateKey(w, priv) + case *ecdsa.PrivateKey: + err = serializeECDSAPrivateKey(w, priv) + case *ed25519.PrivateKey: + err = serializeEdDSAPrivateKey(w, priv) + case *ecdh.PrivateKey: + err = serializeECDHPrivateKey(w, priv) + default: + err = errors.InvalidArgumentError("unknown private key type") + } + return +} + +func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { + switch pk.PublicKey.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly: + return pk.parseRSAPrivateKey(data) + case PubKeyAlgoDSA: + return pk.parseDSAPrivateKey(data) + case PubKeyAlgoElGamal: + return pk.parseElGamalPrivateKey(data) + case PubKeyAlgoECDSA: + return pk.parseECDSAPrivateKey(data) + case PubKeyAlgoECDH: + return pk.parseECDHPrivateKey(data) + case PubKeyAlgoEdDSA: + return pk.parseEdDSAPrivateKey(data) + } + panic("impossible") +} + +func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) { + rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey) + rsaPriv := new(rsa.PrivateKey) + rsaPriv.PublicKey = *rsaPub + + buf := bytes.NewBuffer(data) + d := new(encoding.MPI) + if _, err := d.ReadFrom(buf); err != nil { + return err + } + + p := new(encoding.MPI) + if _, err := p.ReadFrom(buf); err != nil { + return err + } + + q := new(encoding.MPI) + if _, err := q.ReadFrom(buf); err != nil { + return err + } + + rsaPriv.D = new(big.Int).SetBytes(d.Bytes()) + rsaPriv.Primes = make([]*big.Int, 2) + rsaPriv.Primes[0] = new(big.Int).SetBytes(p.Bytes()) + rsaPriv.Primes[1] = new(big.Int).SetBytes(q.Bytes()) + if err := rsaPriv.Validate(); err != nil { + return errors.KeyInvalidError(err.Error()) + } + rsaPriv.Precompute() + pk.PrivateKey = rsaPriv + + return nil +} + +func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) { + dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey) + dsaPriv := new(dsa.PrivateKey) + dsaPriv.PublicKey = *dsaPub + + buf := bytes.NewBuffer(data) + x := new(encoding.MPI) + if _, err := x.ReadFrom(buf); err != nil { + return err + } + + dsaPriv.X = new(big.Int).SetBytes(x.Bytes()) + if err := validateDSAParameters(dsaPriv); err != nil { + return err + } + pk.PrivateKey = dsaPriv + + return nil +} + +func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) { + pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey) + priv := new(elgamal.PrivateKey) + priv.PublicKey = *pub + + buf := bytes.NewBuffer(data) + x := new(encoding.MPI) + if _, err := x.ReadFrom(buf); err != nil { + return err + } + + priv.X = new(big.Int).SetBytes(x.Bytes()) + if err := validateElGamalParameters(priv); err != nil { + return err + } + pk.PrivateKey = priv + + return nil +} + +func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) { + ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey) + ecdsaPriv := new(ecdsa.PrivateKey) + ecdsaPriv.PublicKey = *ecdsaPub + + buf := bytes.NewBuffer(data) + d := new(encoding.MPI) + if _, err := d.ReadFrom(buf); err != nil { + return err + } + + ecdsaPriv.D = new(big.Int).SetBytes(d.Bytes()) + if err := validateECDSAParameters(ecdsaPriv); err != nil { + return err + } + pk.PrivateKey = ecdsaPriv + + return nil +} + +func (pk *PrivateKey) parseECDHPrivateKey(data []byte) (err error) { + ecdhPub := pk.PublicKey.PublicKey.(*ecdh.PublicKey) + ecdhPriv := new(ecdh.PrivateKey) + ecdhPriv.PublicKey = *ecdhPub + + buf := bytes.NewBuffer(data) + d := new(encoding.MPI) + if _, err := d.ReadFrom(buf); err != nil { + return err + } + + ecdhPriv.D = d.Bytes() + if err := validateECDHParameters(ecdhPriv); err != nil { + return err + } + pk.PrivateKey = ecdhPriv + + return nil +} + +func (pk *PrivateKey) parseEdDSAPrivateKey(data []byte) (err error) { + eddsaPub := pk.PublicKey.PublicKey.(*ed25519.PublicKey) + eddsaPriv := make(ed25519.PrivateKey, ed25519.PrivateKeySize) + + buf := bytes.NewBuffer(data) + d := new(encoding.MPI) + if _, err := d.ReadFrom(buf); err != nil { + return err + } + + priv := d.Bytes() + copy(eddsaPriv[32-len(priv):32], priv) + copy(eddsaPriv[32:], (*eddsaPub)[:]) + if err := validateEdDSAParameters(&eddsaPriv); err != nil { + return err + } + pk.PrivateKey = &eddsaPriv + + return nil +} + +func validateECDSAParameters(priv *ecdsa.PrivateKey) error { + return validateCommonECC(priv.Curve, priv.D.Bytes(), priv.X, priv.Y) +} + +func validateECDHParameters(priv *ecdh.PrivateKey) error { + if priv.CurveType != ecc.Curve25519 { + return validateCommonECC(priv.Curve, priv.D, priv.X, priv.Y) + } + // Handle Curve25519 + Q := priv.X.Bytes()[1:] + var d [32]byte + // Copy reversed d + l := len(priv.D) + for i := 0; i < l; i++ { + d[i] = priv.D[l-i-1] + } + var expectedQ [32]byte + curve25519.ScalarBaseMult(&expectedQ, &d) + if !bytes.Equal(Q, expectedQ[:]) { + return errors.KeyInvalidError("ECDH curve25519: invalid point") + } + return nil +} + +func validateCommonECC(curve elliptic.Curve, d []byte, X, Y *big.Int) error { + // the public point should not be at infinity (0,0) + zero := new(big.Int) + if X.Cmp(zero) == 0 && Y.Cmp(zero) == 0 { + return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): infinity point", curve.Params().Name)) + } + // re-derive the public point Q' = (X,Y) = dG + // to compare to declared Q in public key + expectedX, expectedY := curve.ScalarBaseMult(d) + if X.Cmp(expectedX) != 0 || Y.Cmp(expectedY) != 0 { + return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): invalid point", curve.Params().Name)) + } + return nil +} + +func validateEdDSAParameters(priv *ed25519.PrivateKey) error { + // In EdDSA, the serialized public point is stored as part of private key (together with the seed), + // hence we can re-derive the key from the seed + seed := priv.Seed() + expectedPriv := ed25519.NewKeyFromSeed(seed) + if !bytes.Equal(*priv, expectedPriv) { + return errors.KeyInvalidError("eddsa: invalid point") + } + return nil +} + +func validateDSAParameters(priv *dsa.PrivateKey) error { + p := priv.P // group prime + q := priv.Q // subgroup order + g := priv.G // g has order q mod p + x := priv.X // secret + y := priv.Y // y == g**x mod p + one := big.NewInt(1) + // expect g, y >= 2 and g < p + if g.Cmp(one) <= 0 || y.Cmp(one) <= 0 || g.Cmp(p) > 0 { + return errors.KeyInvalidError("dsa: invalid group") + } + // expect p > q + if p.Cmp(q) <= 0 { + return errors.KeyInvalidError("dsa: invalid group prime") + } + // q should be large enough and divide p-1 + pSub1 := new(big.Int).Sub(p, one) + if q.BitLen() < 150 || new(big.Int).Mod(pSub1, q).Cmp(big.NewInt(0)) != 0 { + return errors.KeyInvalidError("dsa: invalid order") + } + // confirm that g has order q mod p + if !q.ProbablyPrime(32) || new(big.Int).Exp(g, q, p).Cmp(one) != 0 { + return errors.KeyInvalidError("dsa: invalid order") + } + // check y + if new(big.Int).Exp(g, x, p).Cmp(y) != 0 { + return errors.KeyInvalidError("dsa: mismatching values") + } + + return nil +} + +func validateElGamalParameters(priv *elgamal.PrivateKey) error { + p := priv.P // group prime + g := priv.G // g has order p-1 mod p + x := priv.X // secret + y := priv.Y // y == g**x mod p + one := big.NewInt(1) + // Expect g, y >= 2 and g < p + if g.Cmp(one) <= 0 || y.Cmp(one) <= 0 || g.Cmp(p) > 0 { + return errors.KeyInvalidError("elgamal: invalid group") + } + if p.BitLen() < 1024 { + return errors.KeyInvalidError("elgamal: group order too small") + } + pSub1 := new(big.Int).Sub(p, one) + if new(big.Int).Exp(g, pSub1, p).Cmp(one) != 0 { + return errors.KeyInvalidError("elgamal: invalid group") + } + // Since p-1 is not prime, g might have a smaller order that divides p-1. + // We cannot confirm the exact order of g, but we make sure it is not too small. + gExpI := new(big.Int).Set(g) + i := 1 + threshold := 2 << 17 // we want order > threshold + for i < threshold { + i++ // we check every order to make sure key validation is not easily bypassed by guessing y' + gExpI.Mod(new(big.Int).Mul(gExpI, g), p) + if gExpI.Cmp(one) == 0 { + return errors.KeyInvalidError("elgamal: order too small") + } + } + // Check y + if new(big.Int).Exp(g, x, p).Cmp(y) != 0 { + return errors.KeyInvalidError("elgamal: mismatching values") + } + + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go new file mode 100644 index 0000000000..029b8f1aab --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go @@ -0,0 +1,12 @@ +package packet + +// Generated with `gpg --export-secret-keys "Test Key 2"` +const privKeyRSAHex = "9501fe044cc349a8010400b70ca0010e98c090008d45d1ee8f9113bd5861fd57b88bacb7c68658747663f1e1a3b5a98f32fda6472373c024b97359cd2efc88ff60f77751adfbf6af5e615e6a1408cfad8bf0cea30b0d5f53aa27ad59089ba9b15b7ebc2777a25d7b436144027e3bcd203909f147d0e332b240cf63d3395f5dfe0df0a6c04e8655af7eacdf0011010001fe0303024a252e7d475fd445607de39a265472aa74a9320ba2dac395faa687e9e0336aeb7e9a7397e511b5afd9dc84557c80ac0f3d4d7bfec5ae16f20d41c8c84a04552a33870b930420e230e179564f6d19bb153145e76c33ae993886c388832b0fa042ddda7f133924f3854481533e0ede31d51278c0519b29abc3bf53da673e13e3e1214b52413d179d7f66deee35cac8eacb060f78379d70ef4af8607e68131ff529439668fc39c9ce6dfef8a5ac234d234802cbfb749a26107db26406213ae5c06d4673253a3cbee1fcbae58d6ab77e38d6e2c0e7c6317c48e054edadb5a40d0d48acb44643d998139a8a66bb820be1f3f80185bc777d14b5954b60effe2448a036d565c6bc0b915fcea518acdd20ab07bc1529f561c58cd044f723109b93f6fd99f876ff891d64306b5d08f48bab59f38695e9109c4dec34013ba3153488ce070268381ba923ee1eb77125b36afcb4347ec3478c8f2735b06ef17351d872e577fa95d0c397c88c71b59629a36aec" + +// Generated by `gpg --export-secret-keys` followed by a manual extraction of +// the ElGamal subkey from the packets. +const privKeyElGamalHex = "9d0157044df9ee1a100400eb8e136a58ec39b582629cdadf830bc64e0a94ed8103ca8bb247b27b11b46d1d25297ef4bcc3071785ba0c0bedfe89eabc5287fcc0edf81ab5896c1c8e4b20d27d79813c7aede75320b33eaeeaa586edc00fd1036c10133e6ba0ff277245d0d59d04b2b3421b7244aca5f4a8d870c6f1c1fbff9e1c26699a860b9504f35ca1d700030503fd1ededd3b840795be6d9ccbe3c51ee42e2f39233c432b831ddd9c4e72b7025a819317e47bf94f9ee316d7273b05d5fcf2999c3a681f519b1234bbfa6d359b4752bd9c3f77d6b6456cde152464763414ca130f4e91d91041432f90620fec0e6d6b5116076c2985d5aeaae13be492b9b329efcaf7ee25120159a0a30cd976b42d7afe030302dae7eb80db744d4960c4df930d57e87fe81412eaace9f900e6c839817a614ddb75ba6603b9417c33ea7b6c93967dfa2bcff3fa3c74a5ce2c962db65b03aece14c96cbd0038fc" + +// pkcs1PrivKeyHex is a PKCS#1, RSA private key. +// Generated by `openssl genrsa 1024 | openssl rsa -outform DER | xxd -p` +const pkcs1PrivKeyHex = "3082025d02010002818100e98edfa1c3b35884a54d0b36a6a603b0290fa85e49e30fa23fc94fef9c6790bc4849928607aa48d809da326fb42a969d06ad756b98b9c1a90f5d4a2b6d0ac05953c97f4da3120164a21a679793ce181c906dc01d235cc085ddcdf6ea06c389b6ab8885dfd685959e693138856a68a7e5db263337ff82a088d583a897cf2d59e9020301000102818100b6d5c9eb70b02d5369b3ee5b520a14490b5bde8a317d36f7e4c74b7460141311d1e5067735f8f01d6f5908b2b96fbd881f7a1ab9a84d82753e39e19e2d36856be960d05ac9ef8e8782ea1b6d65aee28fdfe1d61451e8cff0adfe84322f12cf455028b581cf60eb9e0e140ba5d21aeba6c2634d7c65318b9a665fc01c3191ca21024100fa5e818da3705b0fa33278bb28d4b6f6050388af2d4b75ec9375dd91ccf2e7d7068086a8b82a8f6282e4fbbdb8a7f2622eb97295249d87acea7f5f816f54d347024100eecf9406d7dc49cdfb95ab1eff4064de84c7a30f64b2798936a0d2018ba9eb52e4b636f82e96c49cc63b80b675e91e40d1b2e4017d4b9adaf33ab3d9cf1c214f024100c173704ace742c082323066226a4655226819a85304c542b9dacbeacbf5d1881ee863485fcf6f59f3a604f9b42289282067447f2b13dfeed3eab7851fc81e0550240741fc41f3fc002b382eed8730e33c5d8de40256e4accee846667f536832f711ab1d4590e7db91a8a116ac5bff3be13d3f9243ff2e976662aa9b395d907f8e9c9024046a5696c9ef882363e06c9fa4e2f5b580906452befba03f4a99d0f873697ef1f851d2226ca7934b30b7c3e80cb634a67172bbbf4781735fe3e09263e2dd723e7" diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go new file mode 100644 index 0000000000..28971c1ada --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go @@ -0,0 +1,825 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + _ "crypto/sha512" + "encoding/binary" + "fmt" + "hash" + "io" + "math/big" + "strconv" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/ecdh" + "github.com/ProtonMail/go-crypto/openpgp/elgamal" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" + "golang.org/x/crypto/ed25519" +) + +type kdfHashFunction byte +type kdfAlgorithm byte + +// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. +type PublicKey struct { + Version int + CreationTime time.Time + PubKeyAlgo PublicKeyAlgorithm + PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey or *eddsa.PublicKey + Fingerprint []byte + KeyId uint64 + IsSubkey bool + + // RFC 4880 fields + n, e, p, q, g, y encoding.Field + + // RFC 6637 fields + // oid contains the OID byte sequence identifying the elliptic curve used + oid encoding.Field + + // kdf stores key derivation function parameters + // used for ECDH encryption. See RFC 6637, Section 9. + kdf encoding.Field +} + +// UpgradeToV5 updates the version of the key to v5, and updates all necessary +// fields. +func (pk *PublicKey) UpgradeToV5() { + pk.Version = 5 + pk.setFingerprintAndKeyId() +} + +// signingKey provides a convenient abstraction over signature verification +// for v3 and v4 public keys. +type signingKey interface { + SerializeForHash(io.Writer) error + SerializeSignaturePrefix(io.Writer) + serializeWithoutHeaders(io.Writer) error +} + +// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey. +func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoRSA, + PublicKey: pub, + n: new(encoding.MPI).SetBig(pub.N), + e: new(encoding.MPI).SetBig(big.NewInt(int64(pub.E))), + } + + pk.setFingerprintAndKeyId() + return pk +} + +// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey. +func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoDSA, + PublicKey: pub, + p: new(encoding.MPI).SetBig(pub.P), + q: new(encoding.MPI).SetBig(pub.Q), + g: new(encoding.MPI).SetBig(pub.G), + y: new(encoding.MPI).SetBig(pub.Y), + } + + pk.setFingerprintAndKeyId() + return pk +} + +// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey. +func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoElGamal, + PublicKey: pub, + p: new(encoding.MPI).SetBig(pub.P), + g: new(encoding.MPI).SetBig(pub.G), + y: new(encoding.MPI).SetBig(pub.Y), + } + + pk.setFingerprintAndKeyId() + return pk +} + +func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoECDSA, + PublicKey: pub, + p: encoding.NewMPI(elliptic.Marshal(pub.Curve, pub.X, pub.Y)), + } + + curveInfo := ecc.FindByCurve(pub.Curve) + if curveInfo == nil { + panic("unknown elliptic curve") + } + pk.oid = curveInfo.Oid + pk.setFingerprintAndKeyId() + return pk +} + +func NewECDHPublicKey(creationTime time.Time, pub *ecdh.PublicKey) *PublicKey { + var pk *PublicKey + var curveInfo *ecc.CurveInfo + var kdf = encoding.NewOID([]byte{0x1, pub.Hash.Id(), pub.Cipher.Id()}) + if pub.CurveType == ecc.Curve25519 { + pk = &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoECDH, + PublicKey: pub, + p: encoding.NewMPI(pub.X.Bytes()), + kdf: kdf, + } + curveInfo = ecc.FindByName("Curve25519") + } else { + pk = &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoECDH, + PublicKey: pub, + p: encoding.NewMPI(elliptic.Marshal(pub.Curve, pub.X, pub.Y)), + kdf: kdf, + } + curveInfo = ecc.FindByCurve(pub.Curve) + } + if curveInfo == nil { + panic("unknown elliptic curve") + } + pk.oid = curveInfo.Oid + pk.setFingerprintAndKeyId() + return pk +} + +func NewEdDSAPublicKey(creationTime time.Time, pub *ed25519.PublicKey) *PublicKey { + curveInfo := ecc.FindByName("Ed25519") + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoEdDSA, + PublicKey: pub, + oid: curveInfo.Oid, + // Native point format, see draft-koch-eddsa-for-openpgp-04, Appendix B + p: encoding.NewMPI(append([]byte{0x40}, *pub...)), + } + + pk.setFingerprintAndKeyId() + return pk +} + +func (pk *PublicKey) parse(r io.Reader) (err error) { + // RFC 4880, section 5.5.2 + var buf [6]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != 4 && buf[0] != 5 { + return errors.UnsupportedError("public key version " + strconv.Itoa(int(buf[0]))) + } + + pk.Version = int(buf[0]) + if pk.Version == 5 { + var n [4]byte + _, err = readFull(r, n[:]) + if err != nil { + return + } + } + pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) + pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + err = pk.parseRSA(r) + case PubKeyAlgoDSA: + err = pk.parseDSA(r) + case PubKeyAlgoElGamal: + err = pk.parseElGamal(r) + case PubKeyAlgoECDSA: + err = pk.parseECDSA(r) + case PubKeyAlgoECDH: + err = pk.parseECDH(r) + case PubKeyAlgoEdDSA: + err = pk.parseEdDSA(r) + default: + err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) + } + if err != nil { + return + } + + pk.setFingerprintAndKeyId() + return +} + +func (pk *PublicKey) setFingerprintAndKeyId() { + // RFC 4880, section 12.2 + if pk.Version == 5 { + fingerprint := sha256.New() + pk.SerializeForHash(fingerprint) + pk.Fingerprint = make([]byte, 32) + copy(pk.Fingerprint, fingerprint.Sum(nil)) + pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[:8]) + } else { + fingerprint := sha1.New() + pk.SerializeForHash(fingerprint) + pk.Fingerprint = make([]byte, 20) + copy(pk.Fingerprint, fingerprint.Sum(nil)) + pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) + } +} + +// parseRSA parses RSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseRSA(r io.Reader) (err error) { + pk.n = new(encoding.MPI) + if _, err = pk.n.ReadFrom(r); err != nil { + return + } + pk.e = new(encoding.MPI) + if _, err = pk.e.ReadFrom(r); err != nil { + return + } + + if len(pk.e.Bytes()) > 3 { + err = errors.UnsupportedError("large public exponent") + return + } + rsa := &rsa.PublicKey{ + N: new(big.Int).SetBytes(pk.n.Bytes()), + E: 0, + } + for i := 0; i < len(pk.e.Bytes()); i++ { + rsa.E <<= 8 + rsa.E |= int(pk.e.Bytes()[i]) + } + pk.PublicKey = rsa + return +} + +// parseDSA parses DSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseDSA(r io.Reader) (err error) { + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + pk.q = new(encoding.MPI) + if _, err = pk.q.ReadFrom(r); err != nil { + return + } + pk.g = new(encoding.MPI) + if _, err = pk.g.ReadFrom(r); err != nil { + return + } + pk.y = new(encoding.MPI) + if _, err = pk.y.ReadFrom(r); err != nil { + return + } + + dsa := new(dsa.PublicKey) + dsa.P = new(big.Int).SetBytes(pk.p.Bytes()) + dsa.Q = new(big.Int).SetBytes(pk.q.Bytes()) + dsa.G = new(big.Int).SetBytes(pk.g.Bytes()) + dsa.Y = new(big.Int).SetBytes(pk.y.Bytes()) + pk.PublicKey = dsa + return +} + +// parseElGamal parses ElGamal public key material from the given Reader. See +// RFC 4880, section 5.5.2. +func (pk *PublicKey) parseElGamal(r io.Reader) (err error) { + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + pk.g = new(encoding.MPI) + if _, err = pk.g.ReadFrom(r); err != nil { + return + } + pk.y = new(encoding.MPI) + if _, err = pk.y.ReadFrom(r); err != nil { + return + } + + elgamal := new(elgamal.PublicKey) + elgamal.P = new(big.Int).SetBytes(pk.p.Bytes()) + elgamal.G = new(big.Int).SetBytes(pk.g.Bytes()) + elgamal.Y = new(big.Int).SetBytes(pk.y.Bytes()) + pk.PublicKey = elgamal + return +} + +// parseECDSA parses ECDSA public key material from the given Reader. See +// RFC 6637, Section 9. +func (pk *PublicKey) parseECDSA(r io.Reader) (err error) { + pk.oid = new(encoding.OID) + if _, err = pk.oid.ReadFrom(r); err != nil { + return + } + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + + var c elliptic.Curve + curveInfo := ecc.FindByOid(pk.oid) + if curveInfo == nil || curveInfo.SigAlgorithm != ecc.ECDSA { + return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) + } + c = curveInfo.Curve + x, y := elliptic.Unmarshal(c, pk.p.Bytes()) + if x == nil { + return errors.UnsupportedError("failed to parse EC point") + } + pk.PublicKey = &ecdsa.PublicKey{Curve: c, X: x, Y: y} + return +} + +// parseECDH parses ECDH public key material from the given Reader. See +// RFC 6637, Section 9. +func (pk *PublicKey) parseECDH(r io.Reader) (err error) { + pk.oid = new(encoding.OID) + if _, err = pk.oid.ReadFrom(r); err != nil { + return + } + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + pk.kdf = new(encoding.OID) + if _, err = pk.kdf.ReadFrom(r); err != nil { + return + } + + curveInfo := ecc.FindByOid(pk.oid) + if curveInfo == nil { + return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) + } + + c := curveInfo.Curve + cType := curveInfo.CurveType + + var x, y *big.Int + if cType == ecc.Curve25519 { + x = new(big.Int) + x.SetBytes(pk.p.Bytes()) + } else { + x, y = elliptic.Unmarshal(c, pk.p.Bytes()) + } + if x == nil { + return errors.UnsupportedError("failed to parse EC point") + } + + if kdfLen := len(pk.kdf.Bytes()); kdfLen < 3 { + return errors.UnsupportedError("unsupported ECDH KDF length: " + strconv.Itoa(kdfLen)) + } + if reserved := pk.kdf.Bytes()[0]; reserved != 0x01 { + return errors.UnsupportedError("unsupported KDF reserved field: " + strconv.Itoa(int(reserved))) + } + kdfHash, ok := algorithm.HashById[pk.kdf.Bytes()[1]] + if !ok { + return errors.UnsupportedError("unsupported ECDH KDF hash: " + strconv.Itoa(int(pk.kdf.Bytes()[1]))) + } + kdfCipher, ok := algorithm.CipherById[pk.kdf.Bytes()[2]] + if !ok { + return errors.UnsupportedError("unsupported ECDH KDF cipher: " + strconv.Itoa(int(pk.kdf.Bytes()[2]))) + } + + pk.PublicKey = &ecdh.PublicKey{ + CurveType: cType, + Curve: c, + X: x, + Y: y, + KDF: ecdh.KDF{ + Hash: kdfHash, + Cipher: kdfCipher, + }, + } + return +} + +func (pk *PublicKey) parseEdDSA(r io.Reader) (err error) { + pk.oid = new(encoding.OID) + if _, err = pk.oid.ReadFrom(r); err != nil { + return + } + curveInfo := ecc.FindByOid(pk.oid) + if curveInfo == nil || curveInfo.SigAlgorithm != ecc.EdDSA { + return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) + } + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + + eddsa := make(ed25519.PublicKey, ed25519.PublicKeySize) + switch flag := pk.p.Bytes()[0]; flag { + case 0x04: + // TODO: see _grcy_ecc_eddsa_ensure_compact in grcypt + return errors.UnsupportedError("unsupported EdDSA compression: " + strconv.Itoa(int(flag))) + case 0x40: + copy(eddsa[:], pk.p.Bytes()[1:]) + default: + return errors.UnsupportedError("unsupported EdDSA compression: " + strconv.Itoa(int(flag))) + } + + pk.PublicKey = &eddsa + return +} + +// SerializeForHash serializes the PublicKey to w with the special packet +// header format needed for hashing. +func (pk *PublicKey) SerializeForHash(w io.Writer) error { + pk.SerializeSignaturePrefix(w) + return pk.serializeWithoutHeaders(w) +} + +// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. +// The prefix is used when calculating a signature over this public key. See +// RFC 4880, section 5.2.4. +func (pk *PublicKey) SerializeSignaturePrefix(w io.Writer) { + var pLength = pk.algorithmSpecificByteCount() + if pk.Version == 5 { + pLength += 10 // version, timestamp (4), algorithm, key octet count (4). + w.Write([]byte{ + 0x9A, + byte(pLength >> 24), + byte(pLength >> 16), + byte(pLength >> 8), + byte(pLength), + }) + return + } + pLength += 6 + w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) +} + +func (pk *PublicKey) Serialize(w io.Writer) (err error) { + length := 6 // 6 byte header + length += pk.algorithmSpecificByteCount() + if pk.Version == 5 { + length += 4 // octet key count + } + packetType := packetTypePublicKey + if pk.IsSubkey { + packetType = packetTypePublicSubkey + } + err = serializeHeader(w, packetType, length) + if err != nil { + return + } + return pk.serializeWithoutHeaders(w) +} + +func (pk *PublicKey) algorithmSpecificByteCount() int { + length := 0 + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + length += int(pk.n.EncodedLength()) + length += int(pk.e.EncodedLength()) + case PubKeyAlgoDSA: + length += int(pk.p.EncodedLength()) + length += int(pk.q.EncodedLength()) + length += int(pk.g.EncodedLength()) + length += int(pk.y.EncodedLength()) + case PubKeyAlgoElGamal: + length += int(pk.p.EncodedLength()) + length += int(pk.g.EncodedLength()) + length += int(pk.y.EncodedLength()) + case PubKeyAlgoECDSA: + length += int(pk.oid.EncodedLength()) + length += int(pk.p.EncodedLength()) + case PubKeyAlgoECDH: + length += int(pk.oid.EncodedLength()) + length += int(pk.p.EncodedLength()) + length += int(pk.kdf.EncodedLength()) + case PubKeyAlgoEdDSA: + length += int(pk.oid.EncodedLength()) + length += int(pk.p.EncodedLength()) + default: + panic("unknown public key algorithm") + } + return length +} + +// serializeWithoutHeaders marshals the PublicKey to w in the form of an +// OpenPGP public key packet, not including the packet header. +func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { + t := uint32(pk.CreationTime.Unix()) + if _, err = w.Write([]byte{ + byte(pk.Version), + byte(t >> 24), byte(t >> 16), byte(t >> 8), byte(t), + byte(pk.PubKeyAlgo), + }); err != nil { + return + } + + if pk.Version == 5 { + n := pk.algorithmSpecificByteCount() + if _, err = w.Write([]byte{ + byte(n >> 24), byte(n >> 16), byte(n >> 8), byte(n), + }); err != nil { + return + } + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + if _, err = w.Write(pk.n.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.e.EncodedBytes()) + return + case PubKeyAlgoDSA: + if _, err = w.Write(pk.p.EncodedBytes()); err != nil { + return + } + if _, err = w.Write(pk.q.EncodedBytes()); err != nil { + return + } + if _, err = w.Write(pk.g.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.y.EncodedBytes()) + return + case PubKeyAlgoElGamal: + if _, err = w.Write(pk.p.EncodedBytes()); err != nil { + return + } + if _, err = w.Write(pk.g.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.y.EncodedBytes()) + return + case PubKeyAlgoECDSA: + if _, err = w.Write(pk.oid.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.p.EncodedBytes()) + return + case PubKeyAlgoECDH: + if _, err = w.Write(pk.oid.EncodedBytes()); err != nil { + return + } + if _, err = w.Write(pk.p.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.kdf.EncodedBytes()) + return + case PubKeyAlgoEdDSA: + if _, err = w.Write(pk.oid.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.p.EncodedBytes()) + return + } + return errors.InvalidArgumentError("bad public-key algorithm") +} + +// CanSign returns true iff this public key can generate signatures +func (pk *PublicKey) CanSign() bool { + return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal && pk.PubKeyAlgo != PubKeyAlgoECDH +} + +// VerifySignature returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) { + sig.AddMetadataToHashSuffix() + } + signed.Write(sig.HashSuffix) + hashBytes := signed.Sum(nil) + if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + return errors.SignatureError("hash tag doesn't match") + } + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey) + err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.Bytes())) + if err != nil { + return errors.SignatureError("RSA verification failure") + } + return nil + case PubKeyAlgoDSA: + dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey) + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 + if len(hashBytes) > subgroupSize { + hashBytes = hashBytes[:subgroupSize] + } + if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.Bytes()), new(big.Int).SetBytes(sig.DSASigS.Bytes())) { + return errors.SignatureError("DSA verification failure") + } + return nil + case PubKeyAlgoECDSA: + ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey) + if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.Bytes()), new(big.Int).SetBytes(sig.ECDSASigS.Bytes())) { + return errors.SignatureError("ECDSA verification failure") + } + return nil + case PubKeyAlgoEdDSA: + eddsaPublicKey := pk.PublicKey.(*ed25519.PublicKey) + + sigR := sig.EdDSASigR.Bytes() + sigS := sig.EdDSASigS.Bytes() + + eddsaSig := make([]byte, ed25519.SignatureSize) + copy(eddsaSig[32-len(sigR):32], sigR) + copy(eddsaSig[64-len(sigS):], sigS) + + if !ed25519.Verify(*eddsaPublicKey, hashBytes, eddsaSig) { + return errors.SignatureError("EdDSA verification failure") + } + return nil + default: + return errors.SignatureError("Unsupported public key algorithm used in signature") + } +} + +// keySignatureHash returns a Hash of the message that needs to be signed for +// pk to assert a subkey relationship to signed. +func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + err = pk.SerializeForHash(h) + if err != nil { + return nil, err + } + + err = signed.SerializeForHash(h) + return +} + +// VerifyKeySignature returns nil iff sig is a valid signature, made by this +// public key, of signed. +func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { + h, err := keySignatureHash(pk, signed, sig.Hash) + if err != nil { + return err + } + if err = pk.VerifySignature(h, sig); err != nil { + return err + } + + if sig.FlagSign { + // Signing subkeys must be cross-signed. See + // https://www.gnupg.org/faq/subkey-cross-certify.html. + if sig.EmbeddedSignature == nil { + return errors.StructuralError("signing subkey is missing cross-signature") + } + // Verify the cross-signature. This is calculated over the same + // data as the main signature, so we cannot just recursively + // call signed.VerifyKeySignature(...) + if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil { + return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) + } + if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { + return errors.StructuralError("error while verifying cross-signature: " + err.Error()) + } + } + + return nil +} + +func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + err = pk.SerializeForHash(h) + + return +} + +// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this +// public key. +func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { + h, err := keyRevocationHash(pk, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// VerifySubkeyRevocationSignature returns nil iff sig is a valid subkey revocation signature, +// made by the passed in signingKey. +func (pk *PublicKey) VerifySubkeyRevocationSignature(sig *Signature, signingKey *PublicKey) (err error) { + h, err := keyRevocationHash(pk, sig.Hash) + if err != nil { + return err + } + return signingKey.VerifySignature(h, sig) +} + +// userIdSignatureHash returns a Hash of the message that needs to be signed +// to assert that pk is a valid key for id. +func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + + var buf [5]byte + buf[0] = 0xb4 + buf[1] = byte(len(id) >> 24) + buf[2] = byte(len(id) >> 16) + buf[3] = byte(len(id) >> 8) + buf[4] = byte(len(id)) + h.Write(buf[:]) + h.Write([]byte(id)) + + return +} + +// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) { + h, err := userIdSignatureHash(id, pub, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// KeyIdString returns the public key's fingerprint in capital hex +// (e.g. "6C7EE1B8621CC013"). +func (pk *PublicKey) KeyIdString() string { + return fmt.Sprintf("%X", pk.Fingerprint[12:20]) +} + +// KeyIdShortString returns the short form of public key's fingerprint +// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). +func (pk *PublicKey) KeyIdShortString() string { + return fmt.Sprintf("%X", pk.Fingerprint[16:20]) +} + +// BitLength returns the bit length for the given public key. +func (pk *PublicKey) BitLength() (bitLength uint16, err error) { + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + bitLength = pk.n.BitLength() + case PubKeyAlgoDSA: + bitLength = pk.p.BitLength() + case PubKeyAlgoElGamal: + bitLength = pk.p.BitLength() + case PubKeyAlgoECDSA: + bitLength = pk.p.BitLength() + case PubKeyAlgoECDH: + bitLength = pk.p.BitLength() + case PubKeyAlgoEdDSA: + bitLength = pk.p.BitLength() + default: + err = errors.InvalidArgumentError("bad public-key algorithm") + } + return +} + +// KeyExpired returns whether sig is a self-signature of a key that has +// expired or is created in the future. +func (pk *PublicKey) KeyExpired(sig *Signature, currentTime time.Time) bool { + if pk.CreationTime.After(currentTime) { + return true + } + if sig.KeyLifetimeSecs == nil || *sig.KeyLifetimeSecs == 0 { + return false + } + expiry := pk.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second) + return currentTime.After(expiry) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go new file mode 100644 index 0000000000..b255f1f6f8 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go @@ -0,0 +1,24 @@ +package packet + +const rsaFingerprintHex = "5fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb" + +const rsaPkDataHex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001" + +const dsaFingerprintHex = "eece4c094db002103714c63c8e8fbe54062f19ed" + +const dsaPkDataHex = "9901a2044d432f89110400cd581334f0d7a1e1bdc8b9d6d8c0baf68793632735d2bb0903224cbaa1dfbf35a60ee7a13b92643421e1eb41aa8d79bea19a115a677f6b8ba3c7818ce53a6c2a24a1608bd8b8d6e55c5090cbde09dd26e356267465ae25e69ec8bdd57c7bbb2623e4d73336f73a0a9098f7f16da2e25252130fd694c0e8070c55a812a423ae7f00a0ebf50e70c2f19c3520a551bd4b08d30f23530d3d03ff7d0bf4a53a64a09dc5e6e6e35854b7d70c882b0c60293401958b1bd9e40abec3ea05ba87cf64899299d4bd6aa7f459c201d3fbbd6c82004bdc5e8a9eb8082d12054cc90fa9d4ec251a843236a588bf49552441817436c4f43326966fe85447d4e6d0acf8fa1ef0f014730770603ad7634c3088dc52501c237328417c31c89ed70400b2f1a98b0bf42f11fefc430704bebbaa41d9f355600c3facee1e490f64208e0e094ea55e3a598a219a58500bf78ac677b670a14f4e47e9cf8eab4f368cc1ddcaa18cc59309d4cc62dd4f680e73e6cc3e1ce87a84d0925efbcb26c575c093fc42eecf45135fabf6403a25c2016e1774c0484e440a18319072c617cc97ac0a3bb0" + +const ecdsaFingerprintHex = "9892270b38b8980b05c8d56d43fe956c542ca00b" + +const ecdsaPkDataHex = "9893045071c29413052b8104002304230401f4867769cedfa52c325018896245443968e52e51d0c2df8d939949cb5b330f2921711fbee1c9b9dddb95d15cb0255e99badeddda7cc23d9ddcaacbc290969b9f24019375d61c2e4e3b36953a28d8b2bc95f78c3f1d592fb24499be348656a7b17e3963187b4361afe497bc5f9f81213f04069f8e1fb9e6a6290ae295ca1a92b894396cb4" + +const ecdhFingerprintHex = "722354df2475a42164d1d49faa8b938f9a201946" + +const ecdhPkDataHex = "b90073044d53059212052b810400220303042faa84024a20b6735c4897efa5bfb41bf85b7eefeab5ca0cb9ffc8ea04a46acb25534a577694f9e25340a4ab5223a9dd1eda530c8aa2e6718db10d7e672558c7736fe09369ea5739a2a3554bf16d41faa50562f11c6d39bbd5dffb6b9a9ec91803010909" + +const eddsaFingerprintHex = "b2d5e5ec0e6deca6bc8eeeb00907e75e1dd99ad8" + +const eddsaPkDataHex = "98330456e2132b16092b06010401da470f01010740bbda39266affa511a8c2d02edf690fb784b0499c4406185811a163539ef11dc1b41d74657374696e67203c74657374696e674074657374696e672e636f6d3e8879041316080021050256e2132b021b03050b09080702061508090a0b020416020301021e01021780000a09100907e75e1dd99ad86d0c00fe39d2008359352782bc9b61ac382584cd8eff3f57a18c2287e3afeeb05d1f04ba00fe2d0bc1ddf3ff8adb9afa3e7d9287244b4ec567f3db4d60b74a9b5465ed528203" + +// Source: https://sites.google.com/site/brainhub/pgpecckeys#TOC-ECC-NIST-P-384-key +const ecc384PubHex = `99006f044d53059213052b81040022030304f6b8c5aced5b84ef9f4a209db2e4a9dfb70d28cb8c10ecd57674a9fa5a67389942b62d5e51367df4c7bfd3f8e500feecf07ed265a621a8ebbbe53e947ec78c677eba143bd1533c2b350e1c29f82313e1e1108eba063be1e64b10e6950e799c2db42465635f6473615f64685f333834203c6f70656e70677040627261696e6875622e6f72673e8900cb04101309005305024d530592301480000000002000077072656665727265642d656d61696c2d656e636f64696e67407067702e636f6d7067706d696d65040b090807021901051b03000000021602051e010000000415090a08000a0910098033880f54719fca2b0180aa37350968bd5f115afd8ce7bc7b103822152dbff06d0afcda835329510905b98cb469ba208faab87c7412b799e7b633017f58364ea480e8a1a3f253a0c5f22c446e8be9a9fce6210136ee30811abbd49139de28b5bdf8dc36d06ae748579e9ff503b90073044d53059212052b810400220303042faa84024a20b6735c4897efa5bfb41bf85b7eefeab5ca0cb9ffc8ea04a46acb25534a577694f9e25340a4ab5223a9dd1eda530c8aa2e6718db10d7e672558c7736fe09369ea5739a2a3554bf16d41faa50562f11c6d39bbd5dffb6b9a9ec9180301090989008404181309000c05024d530592051b0c000000000a0910098033880f54719f80970180eee7a6d8fcee41ee4f9289df17f9bcf9d955dca25c583b94336f3a2b2d4986dc5cf417b8d2dc86f741a9e1a6d236c0e3017d1c76575458a0cfb93ae8a2b274fcc65ceecd7a91eec83656ba13219969f06945b48c56bd04152c3a0553c5f2f4bd1267` diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go new file mode 100644 index 0000000000..94ae9ad9ac --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go @@ -0,0 +1,78 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// Reader reads packets from an io.Reader and allows packets to be 'unread' so +// that they result from the next call to Next. +type Reader struct { + q []Packet + readers []io.Reader +} + +// New io.Readers are pushed when a compressed or encrypted packet is processed +// and recursively treated as a new source of packets. However, a carefully +// crafted packet can trigger an infinite recursive sequence of packets. See +// http://mumble.net/~campbell/misc/pgp-quine +// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402 +// This constant limits the number of recursive packets that may be pushed. +const maxReaders = 32 + +// Next returns the most recently unread Packet, or reads another packet from +// the top-most io.Reader. Unknown packet types are skipped. +func (r *Reader) Next() (p Packet, err error) { + if len(r.q) > 0 { + p = r.q[len(r.q)-1] + r.q = r.q[:len(r.q)-1] + return + } + + for len(r.readers) > 0 { + p, err = Read(r.readers[len(r.readers)-1]) + if err == nil { + return + } + if err == io.EOF { + r.readers = r.readers[:len(r.readers)-1] + continue + } + // TODO: Add strict mode that rejects unknown packets, instead of ignoring them. + if _, ok := err.(errors.UnknownPacketTypeError); !ok { + return nil, err + } + } + + return nil, io.EOF +} + +// Push causes the Reader to start reading from a new io.Reader. When an EOF +// error is seen from the new io.Reader, it is popped and the Reader continues +// to read from the next most recent io.Reader. Push returns a StructuralError +// if pushing the reader would exceed the maximum recursion level, otherwise it +// returns nil. +func (r *Reader) Push(reader io.Reader) (err error) { + if len(r.readers) >= maxReaders { + return errors.StructuralError("too many layers of packets") + } + r.readers = append(r.readers, reader) + return nil +} + +// Unread causes the given Packet to be returned from the next call to Next. +func (r *Reader) Unread(p Packet) { + r.q = append(r.q, p) +} + +func NewReader(r io.Reader) *Reader { + return &Reader{ + q: nil, + readers: []io.Reader{r}, + } +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go new file mode 100644 index 0000000000..6b1704e429 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go @@ -0,0 +1,964 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "encoding/asn1" + "encoding/binary" + "hash" + "io" + "math/big" + "strconv" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" + "github.com/ProtonMail/go-crypto/openpgp/s2k" +) + +const ( + // See RFC 4880, section 5.2.3.21 for details. + KeyFlagCertify = 1 << iota + KeyFlagSign + KeyFlagEncryptCommunications + KeyFlagEncryptStorage +) + +// Signature represents a signature. See RFC 4880, section 5.2. +type Signature struct { + Version int + SigType SignatureType + PubKeyAlgo PublicKeyAlgorithm + Hash crypto.Hash + + // HashSuffix is extra data that is hashed in after the signed data. + HashSuffix []byte + // HashTag contains the first two bytes of the hash for fast rejection + // of bad signed data. + HashTag [2]byte + + // Metadata includes format, filename and time, and is protected by v5 + // signatures of type 0x00 or 0x01. This metadata is included into the hash + // computation; if nil, six 0x00 bytes are used instead. See section 5.2.4. + Metadata *LiteralData + + CreationTime time.Time + + RSASignature encoding.Field + DSASigR, DSASigS encoding.Field + ECDSASigR, ECDSASigS encoding.Field + EdDSASigR, EdDSASigS encoding.Field + + // rawSubpackets contains the unparsed subpackets, in order. + rawSubpackets []outputSubpacket + + // The following are optional so are nil when not included in the + // signature. + + SigLifetimeSecs, KeyLifetimeSecs *uint32 + PreferredSymmetric, PreferredHash, PreferredCompression []uint8 + PreferredAEAD []uint8 + IssuerKeyId *uint64 + IssuerFingerprint []byte + IsPrimaryId *bool + + // FlagsValid is set if any flags were given. See RFC 4880, section + // 5.2.3.21 for details. + FlagsValid bool + FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool + + // RevocationReason is set if this signature has been revoked. + // See RFC 4880, section 5.2.3.23 for details. + RevocationReason *uint8 + RevocationReasonText string + + // In a self-signature, these flags are set there is a features subpacket + // indicating that the issuer implementation supports these features + // (section 5.2.5.25). + MDC, AEAD, V5Keys bool + + // EmbeddedSignature, if non-nil, is a signature of the parent key, by + // this key. This prevents an attacker from claiming another's signing + // subkey as their own. + EmbeddedSignature *Signature + + outSubpackets []outputSubpacket +} + +func (sig *Signature) parse(r io.Reader) (err error) { + // RFC 4880, section 5.2.3 + var buf [5]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + if buf[0] != 4 && buf[0] != 5 { + err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) + return + } + sig.Version = int(buf[0]) + _, err = readFull(r, buf[:5]) + if err != nil { + return + } + sig.SigType = SignatureType(buf[0]) + sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA: + default: + err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) + return + } + + var ok bool + sig.Hash, ok = s2k.HashIdToHash(buf[2]) + if !ok { + return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) + } + + hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4]) + hashedSubpackets := make([]byte, hashedSubpacketsLength) + _, err = readFull(r, hashedSubpackets) + if err != nil { + return + } + sig.buildHashSuffix(hashedSubpackets) + err = parseSignatureSubpackets(sig, hashedSubpackets, true) + if err != nil { + return + } + + _, err = readFull(r, buf[:2]) + if err != nil { + return + } + unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1]) + unhashedSubpackets := make([]byte, unhashedSubpacketsLength) + _, err = readFull(r, unhashedSubpackets) + if err != nil { + return + } + err = parseSignatureSubpackets(sig, unhashedSubpackets, false) + if err != nil { + return + } + + _, err = readFull(r, sig.HashTag[:2]) + if err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sig.RSASignature = new(encoding.MPI) + _, err = sig.RSASignature.ReadFrom(r) + case PubKeyAlgoDSA: + sig.DSASigR = new(encoding.MPI) + if _, err = sig.DSASigR.ReadFrom(r); err != nil { + return + } + + sig.DSASigS = new(encoding.MPI) + _, err = sig.DSASigS.ReadFrom(r) + case PubKeyAlgoECDSA: + sig.ECDSASigR = new(encoding.MPI) + if _, err = sig.ECDSASigR.ReadFrom(r); err != nil { + return + } + + sig.ECDSASigS = new(encoding.MPI) + _, err = sig.ECDSASigS.ReadFrom(r) + case PubKeyAlgoEdDSA: + sig.EdDSASigR = new(encoding.MPI) + if _, err = sig.EdDSASigR.ReadFrom(r); err != nil { + return + } + + sig.EdDSASigS = new(encoding.MPI) + if _, err = sig.EdDSASigS.ReadFrom(r); err != nil { + return + } + default: + panic("unreachable") + } + return +} + +// parseSignatureSubpackets parses subpackets of the main signature packet. See +// RFC 4880, section 5.2.3.1. +func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) { + for len(subpackets) > 0 { + subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed) + if err != nil { + return + } + } + + if sig.CreationTime.IsZero() { + err = errors.StructuralError("no creation time in signature") + } + + return +} + +type signatureSubpacketType uint8 + +const ( + creationTimeSubpacket signatureSubpacketType = 2 + signatureExpirationSubpacket signatureSubpacketType = 3 + keyExpirationSubpacket signatureSubpacketType = 9 + prefSymmetricAlgosSubpacket signatureSubpacketType = 11 + issuerSubpacket signatureSubpacketType = 16 + prefHashAlgosSubpacket signatureSubpacketType = 21 + prefCompressionSubpacket signatureSubpacketType = 22 + primaryUserIdSubpacket signatureSubpacketType = 25 + keyFlagsSubpacket signatureSubpacketType = 27 + reasonForRevocationSubpacket signatureSubpacketType = 29 + featuresSubpacket signatureSubpacketType = 30 + embeddedSignatureSubpacket signatureSubpacketType = 32 + issuerFingerprintSubpacket signatureSubpacketType = 33 + prefAeadAlgosSubpacket signatureSubpacketType = 34 +) + +// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. +func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) { + // RFC 4880, section 5.2.3.1 + var ( + length uint32 + packetType signatureSubpacketType + isCritical bool + ) + switch { + case subpacket[0] < 192: + length = uint32(subpacket[0]) + subpacket = subpacket[1:] + case subpacket[0] < 255: + if len(subpacket) < 2 { + goto Truncated + } + length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192 + subpacket = subpacket[2:] + default: + if len(subpacket) < 5 { + goto Truncated + } + length = uint32(subpacket[1])<<24 | + uint32(subpacket[2])<<16 | + uint32(subpacket[3])<<8 | + uint32(subpacket[4]) + subpacket = subpacket[5:] + } + if length > uint32(len(subpacket)) { + goto Truncated + } + rest = subpacket[length:] + subpacket = subpacket[:length] + if len(subpacket) == 0 { + err = errors.StructuralError("zero length signature subpacket") + return + } + packetType = signatureSubpacketType(subpacket[0] & 0x7f) + isCritical = subpacket[0]&0x80 == 0x80 + subpacket = subpacket[1:] + sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket}) + switch packetType { + case creationTimeSubpacket: + if !isHashed { + err = errors.StructuralError("signature creation time in non-hashed area") + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("signature creation time not four bytes") + return + } + t := binary.BigEndian.Uint32(subpacket) + sig.CreationTime = time.Unix(int64(t), 0) + case signatureExpirationSubpacket: + // Signature expiration time, section 5.2.3.10 + if !isHashed { + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("expiration subpacket with bad length") + return + } + sig.SigLifetimeSecs = new(uint32) + *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case keyExpirationSubpacket: + // Key expiration time, section 5.2.3.6 + if !isHashed { + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("key expiration subpacket with bad length") + return + } + sig.KeyLifetimeSecs = new(uint32) + *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case prefSymmetricAlgosSubpacket: + // Preferred symmetric algorithms, section 5.2.3.7 + if !isHashed { + return + } + sig.PreferredSymmetric = make([]byte, len(subpacket)) + copy(sig.PreferredSymmetric, subpacket) + case issuerSubpacket: + if sig.Version > 4 { + err = errors.StructuralError("issuer subpacket found in v5 key") + } + // Issuer, section 5.2.3.5 + if len(subpacket) != 8 { + err = errors.StructuralError("issuer subpacket with bad length") + return + } + sig.IssuerKeyId = new(uint64) + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) + case prefHashAlgosSubpacket: + // Preferred hash algorithms, section 5.2.3.8 + if !isHashed { + return + } + sig.PreferredHash = make([]byte, len(subpacket)) + copy(sig.PreferredHash, subpacket) + case prefCompressionSubpacket: + // Preferred compression algorithms, section 5.2.3.9 + if !isHashed { + return + } + sig.PreferredCompression = make([]byte, len(subpacket)) + copy(sig.PreferredCompression, subpacket) + case primaryUserIdSubpacket: + // Primary User ID, section 5.2.3.19 + if !isHashed { + return + } + if len(subpacket) != 1 { + err = errors.StructuralError("primary user id subpacket with bad length") + return + } + sig.IsPrimaryId = new(bool) + if subpacket[0] > 0 { + *sig.IsPrimaryId = true + } + case keyFlagsSubpacket: + // Key flags, section 5.2.3.21 + if !isHashed { + return + } + if len(subpacket) == 0 { + err = errors.StructuralError("empty key flags subpacket") + return + } + sig.FlagsValid = true + if subpacket[0]&KeyFlagCertify != 0 { + sig.FlagCertify = true + } + if subpacket[0]&KeyFlagSign != 0 { + sig.FlagSign = true + } + if subpacket[0]&KeyFlagEncryptCommunications != 0 { + sig.FlagEncryptCommunications = true + } + if subpacket[0]&KeyFlagEncryptStorage != 0 { + sig.FlagEncryptStorage = true + } + case reasonForRevocationSubpacket: + // Reason For Revocation, section 5.2.3.23 + if !isHashed { + return + } + if len(subpacket) == 0 { + err = errors.StructuralError("empty revocation reason subpacket") + return + } + sig.RevocationReason = new(uint8) + *sig.RevocationReason = subpacket[0] + sig.RevocationReasonText = string(subpacket[1:]) + case featuresSubpacket: + // Features subpacket, section 5.2.3.24 specifies a very general + // mechanism for OpenPGP implementations to signal support for new + // features. + if !isHashed { + return + } + if len(subpacket) > 0 { + if subpacket[0]&0x01 != 0 { + sig.MDC = true + } + if subpacket[0]&0x02 != 0 { + sig.AEAD = true + } + if subpacket[0]&0x04 != 0 { + sig.V5Keys = true + } + } + case embeddedSignatureSubpacket: + // Only usage is in signatures that cross-certify + // signing subkeys. section 5.2.3.26 describes the + // format, with its usage described in section 11.1 + if sig.EmbeddedSignature != nil { + err = errors.StructuralError("Cannot have multiple embedded signatures") + return + } + sig.EmbeddedSignature = new(Signature) + // Embedded signatures are required to be v4 signatures see + // section 12.1. However, we only parse v4 signatures in this + // file anyway. + if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil { + return nil, err + } + if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { + return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) + } + case issuerFingerprintSubpacket: + v, l := subpacket[0], len(subpacket[1:]) + if v == 5 && l != 32 || v != 5 && l != 20 { + return nil, errors.StructuralError("bad fingerprint length") + } + sig.IssuerFingerprint = make([]byte, l) + copy(sig.IssuerFingerprint, subpacket[1:]) + sig.IssuerKeyId = new(uint64) + if v == 5 { + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[1:9]) + } else { + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[13:21]) + } + case prefAeadAlgosSubpacket: + // Preferred symmetric algorithms, section 5.2.3.8 + if !isHashed { + return + } + sig.PreferredAEAD = make([]byte, len(subpacket)) + copy(sig.PreferredAEAD, subpacket) + default: + if isCritical { + err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType))) + return + } + } + return + +Truncated: + err = errors.StructuralError("signature subpacket truncated") + return +} + +// subpacketLengthLength returns the length, in bytes, of an encoded length value. +func subpacketLengthLength(length int) int { + if length < 192 { + return 1 + } + if length < 16320 { + return 2 + } + return 5 +} + +func (sig *Signature) CheckKeyIdOrFingerprint(pk *PublicKey) bool { + if sig.IssuerFingerprint != nil && len(sig.IssuerFingerprint) >= 20 { + return bytes.Equal(sig.IssuerFingerprint, pk.Fingerprint) + } + return sig.IssuerKeyId != nil && *sig.IssuerKeyId == pk.KeyId +} + +// serializeSubpacketLength marshals the given length into to. +func serializeSubpacketLength(to []byte, length int) int { + // RFC 4880, Section 4.2.2. + if length < 192 { + to[0] = byte(length) + return 1 + } + if length < 16320 { + length -= 192 + to[0] = byte((length >> 8) + 192) + to[1] = byte(length) + return 2 + } + to[0] = 255 + to[1] = byte(length >> 24) + to[2] = byte(length >> 16) + to[3] = byte(length >> 8) + to[4] = byte(length) + return 5 +} + +// subpacketsLength returns the serialized length, in bytes, of the given +// subpackets. +func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + length += subpacketLengthLength(len(subpacket.contents) + 1) + length += 1 // type byte + length += len(subpacket.contents) + } + } + return +} + +// serializeSubpackets marshals the given subpackets into to. +func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + n := serializeSubpacketLength(to, len(subpacket.contents)+1) + to[n] = byte(subpacket.subpacketType) + to = to[1+n:] + n = copy(to, subpacket.contents) + to = to[n:] + } + } + return +} + +// SigExpired returns whether sig is a signature that has expired or is created +// in the future. +func (sig *Signature) SigExpired(currentTime time.Time) bool { + if sig.CreationTime.After(currentTime) { + return true + } + if sig.SigLifetimeSecs == nil || *sig.SigLifetimeSecs == 0 { + return false + } + expiry := sig.CreationTime.Add(time.Duration(*sig.SigLifetimeSecs) * time.Second) + return currentTime.After(expiry) +} + +// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. +func (sig *Signature) buildHashSuffix(hashedSubpackets []byte) (err error) { + hash, ok := s2k.HashToHashId(sig.Hash) + if !ok { + sig.HashSuffix = nil + return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash))) + } + + hashedFields := bytes.NewBuffer([]byte{ + uint8(sig.Version), + uint8(sig.SigType), + uint8(sig.PubKeyAlgo), + uint8(hash), + uint8(len(hashedSubpackets) >> 8), + uint8(len(hashedSubpackets)), + }) + hashedFields.Write(hashedSubpackets) + + var l uint64 = uint64(6 + len(hashedSubpackets)) + if sig.Version == 5 { + hashedFields.Write([]byte{0x05, 0xff}) + hashedFields.Write([]byte{ + uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32), + uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), + }) + } else { + hashedFields.Write([]byte{0x04, 0xff}) + hashedFields.Write([]byte{ + uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), + }) + } + sig.HashSuffix = make([]byte, hashedFields.Len()) + copy(sig.HashSuffix, hashedFields.Bytes()) + return +} + +func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) { + hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) + hashedSubpackets := make([]byte, hashedSubpacketsLen) + serializeSubpackets(hashedSubpackets, sig.outSubpackets, true) + err = sig.buildHashSuffix(hashedSubpackets) + if err != nil { + return + } + if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) { + sig.AddMetadataToHashSuffix() + } + + h.Write(sig.HashSuffix) + digest = h.Sum(nil) + copy(sig.HashTag[:], digest) + return +} + +// Sign signs a message with a private key. The hash, h, must contain +// the hash of the message to be signed and will be mutated by this function. +// On success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) { + if priv.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + sig.Version = priv.PublicKey.Version + sig.IssuerFingerprint = priv.PublicKey.Fingerprint + sig.outSubpackets, err = sig.buildSubpackets(priv.PublicKey) + if err != nil { + return err + } + digest, err := sig.signPrepareHash(h) + if err != nil { + return + } + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + // supports both *rsa.PrivateKey and crypto.Signer + sigdata, err := priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) + if err == nil { + sig.RSASignature = encoding.NewMPI(sigdata) + } + case PubKeyAlgoDSA: + dsaPriv := priv.PrivateKey.(*dsa.PrivateKey) + + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8 + if len(digest) > subgroupSize { + digest = digest[:subgroupSize] + } + r, s, err := dsa.Sign(config.Random(), dsaPriv, digest) + if err == nil { + sig.DSASigR = new(encoding.MPI).SetBig(r) + sig.DSASigS = new(encoding.MPI).SetBig(s) + } + case PubKeyAlgoECDSA: + var r, s *big.Int + if pk, ok := priv.PrivateKey.(*ecdsa.PrivateKey); ok { + // direct support, avoid asn1 wrapping/unwrapping + r, s, err = ecdsa.Sign(config.Random(), pk, digest) + } else { + var b []byte + b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) + if err == nil { + r, s, err = unwrapECDSASig(b) + } + } + if err == nil { + sig.ECDSASigR = new(encoding.MPI).SetBig(r) + sig.ECDSASigS = new(encoding.MPI).SetBig(s) + } + case PubKeyAlgoEdDSA: + sigdata, err := priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, crypto.Hash(0)) + if err == nil { + sig.EdDSASigR = encoding.NewMPI(sigdata[:32]) + sig.EdDSASigS = encoding.NewMPI(sigdata[32:]) + } + default: + err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) + } + + return +} + +// unwrapECDSASig parses the two integer components of an ASN.1-encoded ECDSA +// signature. +func unwrapECDSASig(b []byte) (r, s *big.Int, err error) { + var ecsdaSig struct { + R, S *big.Int + } + _, err = asn1.Unmarshal(b, &ecsdaSig) + if err != nil { + return + } + return ecsdaSig.R, ecsdaSig.S, nil +} + +// SignUserId computes a signature from priv, asserting that pub is a valid +// key for the identity id. On success, the signature is stored in sig. Call +// Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error { + if priv.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + h, err := userIdSignatureHash(id, pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// CrossSignKey computes a signature from signingKey on pub hashed using hashKey. On success, +// the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) CrossSignKey(pub *PublicKey, hashKey *PublicKey, signingKey *PrivateKey, + config *Config) error { + h, err := keySignatureHash(hashKey, pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, signingKey, config) +} + +// SignKey computes a signature from priv, asserting that pub is a subkey. On +// success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error { + if priv.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// RevokeKey computes a revocation signature of pub using priv. On success, the signature is +// stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) RevokeKey(pub *PublicKey, priv *PrivateKey, config *Config) error { + h, err := keyRevocationHash(pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been +// called first. +func (sig *Signature) Serialize(w io.Writer) (err error) { + if len(sig.outSubpackets) == 0 { + sig.outSubpackets = sig.rawSubpackets + } + if sig.RSASignature == nil && sig.DSASigR == nil && sig.ECDSASigR == nil && sig.EdDSASigR == nil { + return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") + } + + sigLength := 0 + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sigLength = int(sig.RSASignature.EncodedLength()) + case PubKeyAlgoDSA: + sigLength = int(sig.DSASigR.EncodedLength()) + sigLength += int(sig.DSASigS.EncodedLength()) + case PubKeyAlgoECDSA: + sigLength = int(sig.ECDSASigR.EncodedLength()) + sigLength += int(sig.ECDSASigS.EncodedLength()) + case PubKeyAlgoEdDSA: + sigLength = int(sig.EdDSASigR.EncodedLength()) + sigLength += int(sig.EdDSASigS.EncodedLength()) + default: + panic("impossible") + } + + unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) + length := len(sig.HashSuffix) - 6 /* trailer not included */ + + 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + + 2 /* hash tag */ + sigLength + if sig.Version == 5 { + length -= 4 // eight-octet instead of four-octet big endian + } + err = serializeHeader(w, packetTypeSignature, length) + if err != nil { + return + } + err = sig.serializeBody(w) + if err != nil { + return err + } + return +} + +func (sig *Signature) serializeBody(w io.Writer) (err error) { + hashedSubpacketsLen := uint16(uint16(sig.HashSuffix[4])<<8) | uint16(sig.HashSuffix[5]) + fields := sig.HashSuffix[:6+hashedSubpacketsLen] + _, err = w.Write(fields) + if err != nil { + return + } + + unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) + unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) + unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) + unhashedSubpackets[1] = byte(unhashedSubpacketsLen) + serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) + + _, err = w.Write(unhashedSubpackets) + if err != nil { + return + } + _, err = w.Write(sig.HashTag[:]) + if err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + _, err = w.Write(sig.RSASignature.EncodedBytes()) + case PubKeyAlgoDSA: + if _, err = w.Write(sig.DSASigR.EncodedBytes()); err != nil { + return + } + _, err = w.Write(sig.DSASigS.EncodedBytes()) + case PubKeyAlgoECDSA: + if _, err = w.Write(sig.ECDSASigR.EncodedBytes()); err != nil { + return + } + _, err = w.Write(sig.ECDSASigS.EncodedBytes()) + case PubKeyAlgoEdDSA: + if _, err = w.Write(sig.EdDSASigR.EncodedBytes()); err != nil { + return + } + _, err = w.Write(sig.EdDSASigS.EncodedBytes()) + default: + panic("impossible") + } + return +} + +// outputSubpacket represents a subpacket to be marshaled. +type outputSubpacket struct { + hashed bool // true if this subpacket is in the hashed area. + subpacketType signatureSubpacketType + isCritical bool + contents []byte +} + +func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubpacket, err error) { + creationTime := make([]byte, 4) + binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix())) + subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime}) + + if sig.IssuerKeyId != nil && sig.Version == 4 { + keyId := make([]byte, 8) + binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) + subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, true, keyId}) + } + if sig.IssuerFingerprint != nil { + contents := append([]uint8{uint8(issuer.Version)}, sig.IssuerFingerprint...) + subpackets = append(subpackets, outputSubpacket{true, issuerFingerprintSubpacket, true, contents}) + } + if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { + sigLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime}) + } + + // Key flags may only appear in self-signatures or certification signatures. + + if sig.FlagsValid { + var flags byte + if sig.FlagCertify { + flags |= KeyFlagCertify + } + if sig.FlagSign { + flags |= KeyFlagSign + } + if sig.FlagEncryptCommunications { + flags |= KeyFlagEncryptCommunications + } + if sig.FlagEncryptStorage { + flags |= KeyFlagEncryptStorage + } + subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) + } + + // The following subpackets may only appear in self-signatures. + + var features = byte(0x00) + if sig.MDC { + features |= 0x01 + } + if sig.AEAD { + features |= 0x02 + } + if sig.V5Keys { + features |= 0x04 + } + + if features != 0x00 { + subpackets = append(subpackets, outputSubpacket{true, featuresSubpacket, false, []byte{features}}) + } + + if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { + keyLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime}) + } + + if sig.IsPrimaryId != nil && *sig.IsPrimaryId { + subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}}) + } + + if len(sig.PreferredSymmetric) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric}) + } + + if len(sig.PreferredHash) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash}) + } + + if len(sig.PreferredCompression) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) + } + + if len(sig.PreferredAEAD) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefAeadAlgosSubpacket, false, sig.PreferredAEAD}) + } + + // Revocation reason appears only in revocation signatures and is serialized as per section 5.2.3.23. + if sig.RevocationReason != nil { + subpackets = append(subpackets, outputSubpacket{true, reasonForRevocationSubpacket, true, + append([]uint8{*sig.RevocationReason}, []uint8(sig.RevocationReasonText)...)}) + } + + // EmbeddedSignature appears only in subkeys capable of signing and is serialized as per section 5.2.3.26. + if sig.EmbeddedSignature != nil { + var buf bytes.Buffer + err = sig.EmbeddedSignature.serializeBody(&buf) + if err != nil { + return + } + subpackets = append(subpackets, outputSubpacket{true, embeddedSignatureSubpacket, true, buf.Bytes()}) + } + + return +} + +// AddMetadataToHashSuffix modifies the current hash suffix to include metadata +// (format, filename, and time). Version 5 keys protect this data including it +// in the hash computation. See section 5.2.4. +func (sig *Signature) AddMetadataToHashSuffix() { + if sig == nil || sig.Version != 5 { + return + } + if sig.SigType != 0x00 && sig.SigType != 0x01 { + return + } + lit := sig.Metadata + if lit == nil { + // This will translate into six 0x00 bytes. + lit = &LiteralData{} + } + + // Extract the current byte count + n := sig.HashSuffix[len(sig.HashSuffix)-8:] + l := uint64( + uint64(n[0])<<56 | uint64(n[1])<<48 | uint64(n[2])<<40 | uint64(n[3])<<32 | + uint64(n[4])<<24 | uint64(n[5])<<16 | uint64(n[6])<<8 | uint64(n[7])) + + suffix := bytes.NewBuffer(nil) + suffix.Write(sig.HashSuffix[:l]) + + // Add the metadata + var buf [4]byte + buf[0] = lit.Format + fileName := lit.FileName + if len(lit.FileName) > 255 { + fileName = fileName[:255] + } + buf[1] = byte(len(fileName)) + suffix.Write(buf[:2]) + suffix.Write([]byte(lit.FileName)) + binary.BigEndian.PutUint32(buf[:], lit.Time) + suffix.Write(buf[:]) + + // Update the counter and restore trailing bytes + l = uint64(suffix.Len()) + suffix.Write([]byte{0x05, 0xff}) + suffix.Write([]byte{ + uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32), + uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), + }) + sig.HashSuffix = suffix.Bytes() +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go new file mode 100644 index 0000000000..0e9f51d51b --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go @@ -0,0 +1,267 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto/cipher" + "io" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/s2k" +) + +// This is the largest session key that we'll support. Since no 512-bit cipher +// has even been seriously used, this is comfortably large. +const maxSessionKeySizeInBytes = 64 + +// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC +// 4880, section 5.3. +type SymmetricKeyEncrypted struct { + Version int + CipherFunc CipherFunction + Mode AEADMode + s2k func(out, in []byte) + aeadNonce []byte + encryptedKey []byte +} + +func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { + // RFC 4880, section 5.3. + var buf [2]byte + if _, err := readFull(r, buf[:]); err != nil { + return err + } + ske.Version = int(buf[0]) + if ske.Version != 4 && ske.Version != 5 { + return errors.UnsupportedError("unknown SymmetricKeyEncrypted version") + } + ske.CipherFunc = CipherFunction(buf[1]) + if ske.CipherFunc.KeySize() == 0 { + return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1]))) + } + + if ske.Version == 5 { + mode := make([]byte, 1) + if _, err := r.Read(mode); err != nil { + return errors.StructuralError("cannot read AEAD octect from packet") + } + ske.Mode = AEADMode(mode[0]) + } + + var err error + if ske.s2k, err = s2k.Parse(r); err != nil { + if _, ok := err.(errors.ErrDummyPrivateKey); ok { + return errors.UnsupportedError("missing key GNU extension in session key") + } + return err + } + + if ske.Version == 5 { + // AEAD nonce + nonce := make([]byte, ske.Mode.NonceLength()) + _, err := readFull(r, nonce) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + ske.aeadNonce = nonce + } + + encryptedKey := make([]byte, maxSessionKeySizeInBytes) + // The session key may follow. We just have to try and read to find + // out. If it exists then we limit it to maxSessionKeySizeInBytes. + n, err := readFull(r, encryptedKey) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + + if n != 0 { + if n == maxSessionKeySizeInBytes { + return errors.UnsupportedError("oversized encrypted session key") + } + ske.encryptedKey = encryptedKey[:n] + } + return nil +} + +// Decrypt attempts to decrypt an encrypted session key and returns the key and +// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data +// packet. +func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) { + key := make([]byte, ske.CipherFunc.KeySize()) + ske.s2k(key, passphrase) + if len(ske.encryptedKey) == 0 { + return key, ske.CipherFunc, nil + } + switch ske.Version { + case 4: + plaintextKey, cipherFunc, err := ske.decryptV4(key) + return plaintextKey, cipherFunc, err + case 5: + plaintextKey, err := ske.decryptV5(key) + return plaintextKey, CipherFunction(0), err + } + err := errors.UnsupportedError("unknown SymmetricKeyEncrypted version") + return nil, CipherFunction(0), err +} + +func (ske *SymmetricKeyEncrypted) decryptV4(key []byte) ([]byte, CipherFunction, error) { + // the IV is all zeros + iv := make([]byte, ske.CipherFunc.blockSize()) + c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv) + plaintextKey := make([]byte, len(ske.encryptedKey)) + c.XORKeyStream(plaintextKey, ske.encryptedKey) + cipherFunc := CipherFunction(plaintextKey[0]) + if cipherFunc.blockSize() == 0 { + return nil, ske.CipherFunc, errors.UnsupportedError( + "unknown cipher: " + strconv.Itoa(int(cipherFunc))) + } + plaintextKey = plaintextKey[1:] + if len(plaintextKey) != cipherFunc.KeySize() { + return nil, cipherFunc, errors.StructuralError( + "length of decrypted key not equal to cipher keysize") + } + return plaintextKey, cipherFunc, nil +} + +func (ske *SymmetricKeyEncrypted) decryptV5(key []byte) ([]byte, error) { + blockCipher := CipherFunction(ske.CipherFunc).new(key) + aead := ske.Mode.new(blockCipher) + + adata := []byte{0xc3, byte(5), byte(ske.CipherFunc), byte(ske.Mode)} + plaintextKey, err := aead.Open(nil, ske.aeadNonce, ske.encryptedKey, adata) + if err != nil { + return nil, err + } + return plaintextKey, nil +} + +// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. +// The packet contains a random session key, encrypted by a key derived from +// the given passphrase. The session key is returned and must be passed to +// SerializeSymmetricallyEncrypted or SerializeAEADEncrypted, depending on +// whether config.AEADConfig != nil. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) { + cipherFunc := config.Cipher() + keySize := cipherFunc.KeySize() + if keySize == 0 { + return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) + } + + sessionKey := make([]byte, keySize) + _, err = io.ReadFull(config.Random(), sessionKey) + if err != nil { + return + } + + err = SerializeSymmetricKeyEncryptedReuseKey(w, sessionKey, passphrase, config) + if err != nil { + return + } + + key = sessionKey + return +} + +// SerializeSymmetricKeyEncryptedReuseKey serializes a symmetric key packet to w. +// The packet contains the given session key, encrypted by a key derived from +// the given passphrase. The session key must be passed to +// SerializeSymmetricallyEncrypted or SerializeAEADEncrypted, depending on +// whether config.AEADConfig != nil. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, passphrase []byte, config *Config) (err error) { + var version int + if config.AEAD() != nil { + version = 5 + } else { + version = 4 + } + cipherFunc := config.Cipher() + keySize := cipherFunc.KeySize() + if keySize == 0 { + return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) + } + + s2kBuf := new(bytes.Buffer) + keyEncryptingKey := make([]byte, keySize) + // s2k.Serialize salts and stretches the passphrase, and writes the + // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf. + err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()}) + if err != nil { + return + } + s2kBytes := s2kBuf.Bytes() + + var packetLength int + switch version { + case 4: + packetLength = 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize + case 5: + nonceLen := config.AEAD().Mode().NonceLength() + tagLen := config.AEAD().Mode().TagLength() + packetLength = 3 + len(s2kBytes) + nonceLen + keySize + tagLen + } + err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) + if err != nil { + return + } + + buf := make([]byte, 2) + // Symmetric Key Encrypted Version + buf[0] = byte(version) + // Cipher function + buf[1] = byte(cipherFunc) + + if version == 5 { + // AEAD mode + buf = append(buf, byte(config.AEAD().Mode())) + } + _, err = w.Write(buf) + if err != nil { + return + } + _, err = w.Write(s2kBytes) + if err != nil { + return + } + + switch version { + case 4: + iv := make([]byte, cipherFunc.blockSize()) + c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv) + encryptedCipherAndKey := make([]byte, keySize+1) + c.XORKeyStream(encryptedCipherAndKey, buf[1:]) + c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey) + _, err = w.Write(encryptedCipherAndKey) + if err != nil { + return + } + case 5: + blockCipher := cipherFunc.new(keyEncryptingKey) + mode := config.AEAD().Mode() + aead := mode.new(blockCipher) + // Sample nonce using random reader + nonce := make([]byte, config.AEAD().Mode().NonceLength()) + _, err = io.ReadFull(config.Random(), nonce) + if err != nil { + return + } + // Seal and write (encryptedData includes auth. tag) + adata := []byte{0xc3, byte(5), byte(cipherFunc), byte(mode)} + encryptedData := aead.Seal(nil, nonce, sessionKey, adata) + _, err = w.Write(nonce) + if err != nil { + return + } + _, err = w.Write(encryptedData) + if err != nil { + return + } + } + + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go new file mode 100644 index 0000000000..8b84de177f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go @@ -0,0 +1,290 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto/cipher" + "crypto/sha1" + "crypto/subtle" + "hash" + "io" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The +// encrypted Contents will consist of more OpenPGP packets. See RFC 4880, +// sections 5.7 and 5.13. +type SymmetricallyEncrypted struct { + MDC bool // true iff this is a type 18 packet and thus has an embedded MAC. + Contents io.Reader + prefix []byte +} + +const symmetricallyEncryptedVersion = 1 + +func (se *SymmetricallyEncrypted) parse(r io.Reader) error { + if se.MDC { + // See RFC 4880, section 5.13. + var buf [1]byte + _, err := readFull(r, buf[:]) + if err != nil { + return err + } + if buf[0] != symmetricallyEncryptedVersion { + return errors.UnsupportedError("unknown SymmetricallyEncrypted version") + } + } + se.Contents = r + return nil +} + +// Decrypt returns a ReadCloser, from which the decrypted Contents of the +// packet can be read. An incorrect key will only be detected after trying +// to decrypt the entire data. +func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) { + keySize := c.KeySize() + if keySize == 0 { + return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c))) + } + if len(key) != keySize { + return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length") + } + + if se.prefix == nil { + se.prefix = make([]byte, c.blockSize()+2) + _, err := readFull(se.Contents, se.prefix) + if err != nil { + return nil, err + } + } else if len(se.prefix) != c.blockSize()+2 { + return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths") + } + + ocfbResync := OCFBResync + if se.MDC { + // MDC packets use a different form of OCFB mode. + ocfbResync = OCFBNoResync + } + + s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync) + + plaintext := cipher.StreamReader{S: s, R: se.Contents} + + if se.MDC { + // MDC packets have an embedded hash that we need to check. + h := sha1.New() + h.Write(se.prefix) + return &seMDCReader{in: plaintext, h: h}, nil + } + + // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser. + return seReader{plaintext}, nil +} + +// seReader wraps an io.Reader with a no-op Close method. +type seReader struct { + in io.Reader +} + +func (ser seReader) Read(buf []byte) (int, error) { + return ser.in.Read(buf) +} + +func (ser seReader) Close() error { + return nil +} + +const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size + +// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold +// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an +// MDC packet containing a hash of the previous Contents which is checked +// against the running hash. See RFC 4880, section 5.13. +type seMDCReader struct { + in io.Reader + h hash.Hash + trailer [mdcTrailerSize]byte + scratch [mdcTrailerSize]byte + trailerUsed int + error bool + eof bool +} + +func (ser *seMDCReader) Read(buf []byte) (n int, err error) { + if ser.error { + err = io.ErrUnexpectedEOF + return + } + if ser.eof { + err = io.EOF + return + } + + // If we haven't yet filled the trailer buffer then we must do that + // first. + for ser.trailerUsed < mdcTrailerSize { + n, err = ser.in.Read(ser.trailer[ser.trailerUsed:]) + ser.trailerUsed += n + if err == io.EOF { + if ser.trailerUsed != mdcTrailerSize { + n = 0 + err = io.ErrUnexpectedEOF + ser.error = true + return + } + ser.eof = true + n = 0 + return + } + + if err != nil { + n = 0 + return + } + } + + // If it's a short read then we read into a temporary buffer and shift + // the data into the caller's buffer. + if len(buf) <= mdcTrailerSize { + n, err = readFull(ser.in, ser.scratch[:len(buf)]) + copy(buf, ser.trailer[:n]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], ser.trailer[n:]) + copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:]) + if n < len(buf) { + ser.eof = true + err = io.EOF + } + return + } + + n, err = ser.in.Read(buf[mdcTrailerSize:]) + copy(buf, ser.trailer[:]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], buf[n:]) + + if err == io.EOF { + ser.eof = true + } + return +} + +// This is a new-format packet tag byte for a type 19 (MDC) packet. +const mdcPacketTagByte = byte(0x80) | 0x40 | 19 + +func (ser *seMDCReader) Close() error { + if ser.error { + return errors.ErrMDCMissing + } + + for !ser.eof { + // We haven't seen EOF so we need to read to the end + var buf [1024]byte + _, err := ser.Read(buf[:]) + if err == io.EOF { + break + } + if err != nil { + return errors.ErrMDCMissing + } + } + + ser.h.Write(ser.trailer[:2]) + + final := ser.h.Sum(nil) + if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 { + return errors.ErrMDCHashMismatch + } + // The hash already includes the MDC header, but we still check its value + // to confirm encryption correctness + if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { + return errors.ErrMDCMissing + } + return nil +} + +// An seMDCWriter writes through to an io.WriteCloser while maintains a running +// hash of the data written. On close, it emits an MDC packet containing the +// running hash. +type seMDCWriter struct { + w io.WriteCloser + h hash.Hash +} + +func (w *seMDCWriter) Write(buf []byte) (n int, err error) { + w.h.Write(buf) + return w.w.Write(buf) +} + +func (w *seMDCWriter) Close() (err error) { + var buf [mdcTrailerSize]byte + + buf[0] = mdcPacketTagByte + buf[1] = sha1.Size + w.h.Write(buf[:2]) + digest := w.h.Sum(nil) + copy(buf[2:], digest) + + _, err = w.w.Write(buf[:]) + if err != nil { + return + } + return w.w.Close() +} + +// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. +type noOpCloser struct { + w io.Writer +} + +func (c noOpCloser) Write(data []byte) (n int, err error) { + return c.w.Write(data) +} + +func (c noOpCloser) Close() error { + return nil +} + +// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet +// to w and returns a WriteCloser to which the to-be-encrypted packets can be +// written. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, config *Config) (Contents io.WriteCloser, err error) { + if c.KeySize() != len(key) { + return nil, errors.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length") + } + writeCloser := noOpCloser{w} + ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC) + if err != nil { + return + } + + _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersion}) + if err != nil { + return + } + + block := c.new(key) + blockSize := block.BlockSize() + iv := make([]byte, blockSize) + _, err = config.Random().Read(iv) + if err != nil { + return + } + s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync) + _, err = ciphertext.Write(prefix) + if err != nil { + return + } + plaintext := cipher.StreamWriter{S: s, W: ciphertext} + + h := sha1.New() + h.Write(iv) + h.Write(iv[blockSize-2:]) + Contents = &seMDCWriter{w: plaintext, h: h} + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go new file mode 100644 index 0000000000..0f760ade2c --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go @@ -0,0 +1,94 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "image" + "image/jpeg" + "io" + "io/ioutil" +) + +const UserAttrImageSubpacket = 1 + +// UserAttribute is capable of storing other types of data about a user +// beyond name, email and a text comment. In practice, user attributes are typically used +// to store a signed thumbnail photo JPEG image of the user. +// See RFC 4880, section 5.12. +type UserAttribute struct { + Contents []*OpaqueSubpacket +} + +// NewUserAttributePhoto creates a user attribute packet +// containing the given images. +func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) { + uat = new(UserAttribute) + for _, photo := range photos { + var buf bytes.Buffer + // RFC 4880, Section 5.12.1. + data := []byte{ + 0x10, 0x00, // Little-endian image header length (16 bytes) + 0x01, // Image header version 1 + 0x01, // JPEG + 0, 0, 0, 0, // 12 reserved octets, must be all zero. + 0, 0, 0, 0, + 0, 0, 0, 0} + if _, err = buf.Write(data); err != nil { + return + } + if err = jpeg.Encode(&buf, photo, nil); err != nil { + return + } + uat.Contents = append(uat.Contents, &OpaqueSubpacket{ + SubType: UserAttrImageSubpacket, + Contents: buf.Bytes()}) + } + return +} + +// NewUserAttribute creates a new user attribute packet containing the given subpackets. +func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute { + return &UserAttribute{Contents: contents} +} + +func (uat *UserAttribute) parse(r io.Reader) (err error) { + // RFC 4880, section 5.13 + b, err := ioutil.ReadAll(r) + if err != nil { + return + } + uat.Contents, err = OpaqueSubpackets(b) + return +} + +// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including +// header. +func (uat *UserAttribute) Serialize(w io.Writer) (err error) { + var buf bytes.Buffer + for _, sp := range uat.Contents { + err = sp.Serialize(&buf) + if err != nil { + return err + } + } + if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil { + return err + } + _, err = w.Write(buf.Bytes()) + return +} + +// ImageData returns zero or more byte slices, each containing +// JPEG File Interchange Format (JFIF), for each photo in the +// user attribute packet. +func (uat *UserAttribute) ImageData() (imageData [][]byte) { + for _, sp := range uat.Contents { + if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 { + imageData = append(imageData, sp.Contents[16:]) + } + } + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go new file mode 100644 index 0000000000..d6bea7d4ac --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go @@ -0,0 +1,160 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "io" + "io/ioutil" + "strings" +) + +// UserId contains text that is intended to represent the name and email +// address of the key holder. See RFC 4880, section 5.11. By convention, this +// takes the form "Full Name (Comment) " +type UserId struct { + Id string // By convention, this takes the form "Full Name (Comment) " which is split out in the fields below. + + Name, Comment, Email string +} + +func hasInvalidCharacters(s string) bool { + for _, c := range s { + switch c { + case '(', ')', '<', '>', 0: + return true + } + } + return false +} + +// NewUserId returns a UserId or nil if any of the arguments contain invalid +// characters. The invalid characters are '\x00', '(', ')', '<' and '>' +func NewUserId(name, comment, email string) *UserId { + // RFC 4880 doesn't deal with the structure of userid strings; the + // name, comment and email form is just a convention. However, there's + // no convention about escaping the metacharacters and GPG just refuses + // to create user ids where, say, the name contains a '('. We mirror + // this behaviour. + + if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) { + return nil + } + + uid := new(UserId) + uid.Name, uid.Comment, uid.Email = name, comment, email + uid.Id = name + if len(comment) > 0 { + if len(uid.Id) > 0 { + uid.Id += " " + } + uid.Id += "(" + uid.Id += comment + uid.Id += ")" + } + if len(email) > 0 { + if len(uid.Id) > 0 { + uid.Id += " " + } + uid.Id += "<" + uid.Id += email + uid.Id += ">" + } + return uid +} + +func (uid *UserId) parse(r io.Reader) (err error) { + // RFC 4880, section 5.11 + b, err := ioutil.ReadAll(r) + if err != nil { + return + } + uid.Id = string(b) + uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id) + return +} + +// Serialize marshals uid to w in the form of an OpenPGP packet, including +// header. +func (uid *UserId) Serialize(w io.Writer) error { + err := serializeHeader(w, packetTypeUserId, len(uid.Id)) + if err != nil { + return err + } + _, err = w.Write([]byte(uid.Id)) + return err +} + +// parseUserId extracts the name, comment and email from a user id string that +// is formatted as "Full Name (Comment) ". +func parseUserId(id string) (name, comment, email string) { + var n, c, e struct { + start, end int + } + var state int + + for offset, rune := range id { + switch state { + case 0: + // Entering name + n.start = offset + state = 1 + fallthrough + case 1: + // In name + if rune == '(' { + state = 2 + n.end = offset + } else if rune == '<' { + state = 5 + n.end = offset + } + case 2: + // Entering comment + c.start = offset + state = 3 + fallthrough + case 3: + // In comment + if rune == ')' { + state = 4 + c.end = offset + } + case 4: + // Between comment and email + if rune == '<' { + state = 5 + } + case 5: + // Entering email + e.start = offset + state = 6 + fallthrough + case 6: + // In email + if rune == '>' { + state = 7 + e.end = offset + } + default: + // After email + } + } + switch state { + case 1: + // ended in the name + n.end = len(id) + case 3: + // ended in comment + c.end = len(id) + case 6: + // ended in email + e.end = len(id) + } + + name = strings.TrimSpace(id[n.start:n.end]) + comment = strings.TrimSpace(id[c.start:c.end]) + email = strings.TrimSpace(id[e.start:e.end]) + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go new file mode 100644 index 0000000000..a649ebbab3 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go @@ -0,0 +1,508 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package openpgp implements high level operations on OpenPGP messages. +package openpgp // import "github.com/ProtonMail/go-crypto/openpgp" + +import ( + "crypto" + _ "crypto/sha256" + "hash" + "io" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/armor" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/packet" +) + +// SignatureType is the armor type for a PGP signature. +var SignatureType = "PGP SIGNATURE" + +// readArmored reads an armored block with the given type. +func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) { + block, err := armor.Decode(r) + if err != nil { + return + } + + if block.Type != expectedType { + return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type) + } + + return block.Body, nil +} + +// MessageDetails contains the result of parsing an OpenPGP encrypted and/or +// signed message. +type MessageDetails struct { + IsEncrypted bool // true if the message was encrypted. + EncryptedToKeyIds []uint64 // the list of recipient key ids. + IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message. + DecryptedWith Key // the private key used to decrypt the message, if any. + IsSigned bool // true if the message is signed. + SignedByKeyId uint64 // the key id of the signer, if any. + SignedBy *Key // the key of the signer, if available. + LiteralData *packet.LiteralData // the metadata of the contents + UnverifiedBody io.Reader // the contents of the message. + + // If IsSigned is true and SignedBy is non-zero then the signature will + // be verified as UnverifiedBody is read. The signature cannot be + // checked until the whole of UnverifiedBody is read so UnverifiedBody + // must be consumed until EOF before the data can be trusted. Even if a + // message isn't signed (or the signer is unknown) the data may contain + // an authentication code that is only checked once UnverifiedBody has + // been consumed. Once EOF has been seen, the following fields are + // valid. (An authentication code failure is reported as a + // SignatureError error when reading from UnverifiedBody.) + Signature *packet.Signature // the signature packet itself. + SignatureError error // nil if the signature is good. + UnverifiedSignatures []*packet.Signature // all other unverified signature packets. + + decrypted io.ReadCloser +} + +// A PromptFunction is used as a callback by functions that may need to decrypt +// a private key, or prompt for a passphrase. It is called with a list of +// acceptable, encrypted private keys and a boolean that indicates whether a +// passphrase is usable. It should either decrypt a private key or return a +// passphrase to try. If the decrypted private key or given passphrase isn't +// correct, the function will be called again, forever. Any error returned will +// be passed up. +type PromptFunction func(keys []Key, symmetric bool) ([]byte, error) + +// A keyEnvelopePair is used to store a private key with the envelope that +// contains a symmetric key, encrypted with that key. +type keyEnvelopePair struct { + key Key + encryptedKey *packet.EncryptedKey +} + +// ReadMessage parses an OpenPGP message that may be signed and/or encrypted. +// The given KeyRing should contain both public keys (for signature +// verification) and, possibly encrypted, private keys for decrypting. +// If config is nil, sensible defaults will be used. +func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) { + var p packet.Packet + + var symKeys []*packet.SymmetricKeyEncrypted + var pubKeys []keyEnvelopePair + // Integrity protected encrypted packet: SymmetricallyEncrypted or AEADEncrypted + var edp packet.EncryptedDataPacket + + packets := packet.NewReader(r) + md = new(MessageDetails) + md.IsEncrypted = true + + // The message, if encrypted, starts with a number of packets + // containing an encrypted decryption key. The decryption key is either + // encrypted to a public key, or with a passphrase. This loop + // collects these packets. +ParsePackets: + for { + p, err = packets.Next() + if err != nil { + return nil, err + } + switch p := p.(type) { + case *packet.SymmetricKeyEncrypted: + // This packet contains the decryption key encrypted with a passphrase. + md.IsSymmetricallyEncrypted = true + symKeys = append(symKeys, p) + case *packet.EncryptedKey: + // This packet contains the decryption key encrypted to a public key. + md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId) + switch p.Algo { + case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal, packet.PubKeyAlgoECDH: + break + default: + continue + } + var keys []Key + if p.KeyId == 0 { + keys = keyring.DecryptionKeys() + } else { + keys = keyring.KeysById(p.KeyId) + } + for _, k := range keys { + pubKeys = append(pubKeys, keyEnvelopePair{k, p}) + } + case *packet.SymmetricallyEncrypted, *packet.AEADEncrypted: + edp = p.(packet.EncryptedDataPacket) + break ParsePackets + case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature: + // This message isn't encrypted. + if len(symKeys) != 0 || len(pubKeys) != 0 { + return nil, errors.StructuralError("key material not followed by encrypted message") + } + packets.Unread(p) + return readSignedMessage(packets, nil, keyring, config) + } + } + + var candidates []Key + var decrypted io.ReadCloser + + // Now that we have the list of encrypted keys we need to decrypt at + // least one of them or, if we cannot, we need to call the prompt + // function so that it can decrypt a key or give us a passphrase. +FindKey: + for { + // See if any of the keys already have a private key available + candidates = candidates[:0] + candidateFingerprints := make(map[string]bool) + + for _, pk := range pubKeys { + if pk.key.PrivateKey == nil { + continue + } + if !pk.key.PrivateKey.Encrypted { + if len(pk.encryptedKey.Key) == 0 { + errDec := pk.encryptedKey.Decrypt(pk.key.PrivateKey, config) + if errDec != nil { + continue + } + } + // Try to decrypt symmetrically encrypted + decrypted, err = edp.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key) + if err != nil && err != errors.ErrKeyIncorrect { + return nil, err + } + if decrypted != nil { + md.DecryptedWith = pk.key + break FindKey + } + } else { + fpr := string(pk.key.PublicKey.Fingerprint[:]) + if v := candidateFingerprints[fpr]; v { + continue + } + candidates = append(candidates, pk.key) + candidateFingerprints[fpr] = true + } + } + + if len(candidates) == 0 && len(symKeys) == 0 { + return nil, errors.ErrKeyIncorrect + } + + if prompt == nil { + return nil, errors.ErrKeyIncorrect + } + + passphrase, err := prompt(candidates, len(symKeys) != 0) + if err != nil { + return nil, err + } + + // Try the symmetric passphrase first + if len(symKeys) != 0 && passphrase != nil { + for _, s := range symKeys { + key, cipherFunc, err := s.Decrypt(passphrase) + // On wrong passphrase, session key decryption is very likely to result in an invalid cipherFunc: + // only for < 5% of cases we will proceed to decrypt the data + if err == nil { + decrypted, err = edp.Decrypt(cipherFunc, key) + // TODO: ErrKeyIncorrect is no longer thrown on SEIP decryption, + // but it might still be relevant for when we implement AEAD decryption (otherwise, remove?) + if err != nil && err != errors.ErrKeyIncorrect { + return nil, err + } + if decrypted != nil { + break FindKey + } + } + } + } + } + + md.decrypted = decrypted + if err := packets.Push(decrypted); err != nil { + return nil, err + } + mdFinal, sensitiveParsingErr := readSignedMessage(packets, md, keyring, config) + if sensitiveParsingErr != nil { + return nil, errors.StructuralError("parsing error") + } + return mdFinal, nil +} + +// readSignedMessage reads a possibly signed message if mdin is non-zero then +// that structure is updated and returned. Otherwise a fresh MessageDetails is +// used. +func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing, config *packet.Config) (md *MessageDetails, err error) { + if mdin == nil { + mdin = new(MessageDetails) + } + md = mdin + + var p packet.Packet + var h hash.Hash + var wrappedHash hash.Hash + var prevLast bool +FindLiteralData: + for { + p, err = packets.Next() + if err != nil { + return nil, err + } + switch p := p.(type) { + case *packet.Compressed: + if err := packets.Push(p.Body); err != nil { + return nil, err + } + case *packet.OnePassSignature: + if prevLast { + return nil, errors.UnsupportedError("nested signature packets") + } + + if p.IsLast { + prevLast = true + } + + h, wrappedHash, err = hashForSignature(p.Hash, p.SigType) + if err != nil { + md.SignatureError = err + } + + md.IsSigned = true + md.SignedByKeyId = p.KeyId + keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) + if len(keys) > 0 { + md.SignedBy = &keys[0] + } + case *packet.LiteralData: + md.LiteralData = p + break FindLiteralData + } + } + + if md.IsSigned && md.SignatureError == nil { + md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md, config} + } else if md.decrypted != nil { + md.UnverifiedBody = checkReader{md} + } else { + md.UnverifiedBody = md.LiteralData.Body + } + + return md, nil +} + +// hashForSignature returns a pair of hashes that can be used to verify a +// signature. The signature may specify that the contents of the signed message +// should be preprocessed (i.e. to normalize line endings). Thus this function +// returns two hashes. The second should be used to hash the message itself and +// performs any needed preprocessing. +func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) { + if hashId == crypto.MD5 { + return nil, nil, errors.UnsupportedError("insecure hash algorithm: MD5") + } + if !hashId.Available() { + return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId))) + } + h := hashId.New() + + switch sigType { + case packet.SigTypeBinary: + return h, h, nil + case packet.SigTypeText: + return h, NewCanonicalTextHash(h), nil + } + + return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) +} + +// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF +// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger +// MDC checks. +type checkReader struct { + md *MessageDetails +} + +func (cr checkReader) Read(buf []byte) (int, error) { + n, sensitiveParsingError := cr.md.LiteralData.Body.Read(buf) + if sensitiveParsingError == io.EOF { + mdcErr := cr.md.decrypted.Close() + if mdcErr != nil { + return n, mdcErr + } + return n, io.EOF + } + + if sensitiveParsingError != nil { + return n, errors.StructuralError("parsing error") + } + + return n, nil +} + +// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes +// the data as it is read. When it sees an EOF from the underlying io.Reader +// it parses and checks a trailing Signature packet and triggers any MDC checks. +type signatureCheckReader struct { + packets *packet.Reader + h, wrappedHash hash.Hash + md *MessageDetails + config *packet.Config +} + +func (scr *signatureCheckReader) Read(buf []byte) (int, error) { + n, sensitiveParsingError := scr.md.LiteralData.Body.Read(buf) + + // Hash only if required + if scr.md.SignedBy != nil { + scr.wrappedHash.Write(buf[:n]) + } + + if sensitiveParsingError == io.EOF { + var p packet.Packet + var readError error + var sig *packet.Signature + + p, readError = scr.packets.Next() + for readError == nil { + var ok bool + if sig, ok = p.(*packet.Signature); ok { + if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) { + sig.Metadata = scr.md.LiteralData + } + + // If signature KeyID matches + if scr.md.SignedBy != nil && *sig.IssuerKeyId == scr.md.SignedByKeyId { + scr.md.Signature = sig + scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) + if scr.md.SignatureError == nil && scr.md.Signature.SigExpired(scr.config.Now()) { + scr.md.SignatureError = errors.ErrSignatureExpired + } + } else { + scr.md.UnverifiedSignatures = append(scr.md.UnverifiedSignatures, sig) + } + } + + p, readError = scr.packets.Next() + } + + if scr.md.SignedBy != nil && scr.md.Signature == nil { + if scr.md.UnverifiedSignatures == nil { + scr.md.SignatureError = errors.StructuralError("LiteralData not followed by signature") + } else { + scr.md.SignatureError = errors.StructuralError("No matching signature found") + } + } + + // The SymmetricallyEncrypted packet, if any, might have an + // unsigned hash of its own. In order to check this we need to + // close that Reader. + if scr.md.decrypted != nil { + mdcErr := scr.md.decrypted.Close() + if mdcErr != nil { + return n, mdcErr + } + } + return n, io.EOF + } + + if sensitiveParsingError != nil { + return n, errors.StructuralError("parsing error") + } + + return n, nil +} + +// CheckDetachedSignature takes a signed file and a detached signature and +// returns the signer if the signature is valid. If the signer isn't known, +// ErrUnknownIssuer is returned. +func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (signer *Entity, err error) { + var expectedHashes []crypto.Hash + return CheckDetachedSignatureAndHash(keyring, signed, signature, expectedHashes, config) +} + +// CheckDetachedSignatureAndHash performs the same actions as +// CheckDetachedSignature and checks that the expected hash functions were used. +func CheckDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (signer *Entity, err error) { + var issuerKeyId uint64 + var hashFunc crypto.Hash + var sigType packet.SignatureType + var keys []Key + var p packet.Packet + + expectedHashesLen := len(expectedHashes) + packets := packet.NewReader(signature) + var sig *packet.Signature + for { + p, err = packets.Next() + if err == io.EOF { + return nil, errors.ErrUnknownIssuer + } + if err != nil { + return nil, err + } + + var ok bool + sig, ok = p.(*packet.Signature) + if !ok { + return nil, errors.StructuralError("non signature packet found") + } + if sig.IssuerKeyId == nil { + return nil, errors.StructuralError("signature doesn't have an issuer") + } + issuerKeyId = *sig.IssuerKeyId + hashFunc = sig.Hash + sigType = sig.SigType + + for i, expectedHash := range expectedHashes { + if hashFunc == expectedHash { + break + } + if i+1 == expectedHashesLen { + return nil, errors.StructuralError("hash algorithm mismatch with cleartext message headers") + } + } + + keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign) + if len(keys) > 0 { + break + } + } + + if len(keys) == 0 { + panic("unreachable") + } + + h, wrappedHash, err := hashForSignature(hashFunc, sigType) + if err != nil { + return nil, err + } + + if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF { + return nil, err + } + + for _, key := range keys { + err = key.PublicKey.VerifySignature(h, sig) + if err == nil { + now := config.Now() + if sig.SigExpired(now) { + return key.Entity, errors.ErrSignatureExpired + } + if key.PublicKey.KeyExpired(key.SelfSignature, now) { + return key.Entity, errors.ErrKeyExpired + } + return key.Entity, nil + } + } + + return nil, err +} + +// CheckArmoredDetachedSignature performs the same actions as +// CheckDetachedSignature but expects the signature to be armored. +func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (signer *Entity, err error) { + body, err := readArmored(signature, SignatureType) + if err != nil { + return + } + + return CheckDetachedSignature(keyring, signed, body, config) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go new file mode 100644 index 0000000000..8caed36e3e --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go @@ -0,0 +1,173 @@ +package openpgp + +const testKey1KeyId = 0xA34D7E18C20C31BB +const testKey3KeyId = 0x338934250CCC0360 +const testKeyP256KeyId = 0xd44a2c495918513e + +const signedInput = "Signed message\nline 2\nline 3\n" +const signedTextInput = "Signed message\r\nline 2\r\nline 3\r\n" + +const recipientUnspecifiedHex = "848c0300000000000000000103ff62d4d578d03cf40c3da998dfe216c074fa6ddec5e31c197c9666ba292830d91d18716a80f699f9d897389a90e6d62d0238f5f07a5248073c0f24920e4bc4a30c2d17ee4e0cae7c3d4aaa4e8dced50e3010a80ee692175fa0385f62ecca4b56ee6e9980aa3ec51b61b077096ac9e800edaf161268593eedb6cc7027ff5cb32745d250010d407a6221ae22ef18469b444f2822478c4d190b24d36371a95cb40087cdd42d9399c3d06a53c0673349bfb607927f20d1e122bde1e2bf3aa6cae6edf489629bcaa0689539ae3b718914d88ededc3b" + +const detachedSignatureHex = "889c04000102000605024d449cd1000a0910a34d7e18c20c31bb167603ff57718d09f28a519fdc7b5a68b6a3336da04df85e38c5cd5d5bd2092fa4629848a33d85b1729402a2aab39c3ac19f9d573f773cc62c264dc924c067a79dfd8a863ae06c7c8686120760749f5fd9b1e03a64d20a7df3446ddc8f0aeadeaeba7cbaee5c1e366d65b6a0c6cc749bcb912d2f15013f812795c2e29eb7f7b77f39ce77" + +const detachedSignatureTextHex = "889c04010102000605024d449d21000a0910a34d7e18c20c31bbc8c60400a24fbef7342603a41cb1165767bd18985d015fb72fe05db42db36cfb2f1d455967f1e491194fbf6cf88146222b23bf6ffbd50d17598d976a0417d3192ff9cc0034fd00f287b02e90418bbefe609484b09231e4e7a5f3562e199bf39909ab5276c4d37382fe088f6b5c3426fc1052865da8b3ab158672d58b6264b10823dc4b39" + +const detachedSignatureDSAHex = "884604001102000605024d6c4eac000a0910338934250ccc0360f18d00a087d743d6405ed7b87755476629600b8b694a39e900a0abff8126f46faf1547c1743c37b21b4ea15b8f83" + +const detachedSignatureP256Hex = "885e0400130a0006050256e5bb00000a0910d44a2c495918513edef001009841a4f792beb0befccb35c8838a6a87d9b936beaa86db6745ddc7b045eee0cf00fd1ac1f78306b17e965935dd3f8bae4587a76587e4af231efe19cc4011a8434817" + +// The plaintext is https://www.gutenberg.org/cache/epub/1080/pg1080.txt +const modestProposalSha512 = "lbbrB1+WP3T9AaC9OQqBdOcCjgeEQadlulXsNPgVx0tyqPzDHwUugZ2gE7V0ESKAw6kAVfgkcuvfgxAAGaeHtw==" + +const testKeys1And2Hex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b0020003b88d044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f0011010001889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab0020003988d044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b0020003b88d044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020003" + +const testKeys1And2PrivateHex = "9501d8044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd00110100010003ff4d91393b9a8e3430b14d6209df42f98dc927425b881f1209f319220841273a802a97c7bdb8b3a7740b3ab5866c4d1d308ad0d3a79bd1e883aacf1ac92dfe720285d10d08752a7efe3c609b1d00f17f2805b217be53999a7da7e493bfc3e9618fd17018991b8128aea70a05dbce30e4fbe626aa45775fa255dd9177aabf4df7cf0200c1ded12566e4bc2bb590455e5becfb2e2c9796482270a943343a7835de41080582c2be3caf5981aa838140e97afa40ad652a0b544f83eb1833b0957dce26e47b0200eacd6046741e9ce2ec5beb6fb5e6335457844fb09477f83b050a96be7da043e17f3a9523567ed40e7a521f818813a8b8a72209f1442844843ccc7eb9805442570200bdafe0438d97ac36e773c7162028d65844c4d463e2420aa2228c6e50dc2743c3d6c72d0d782a5173fe7be2169c8a9f4ef8a7cf3e37165e8c61b89c346cdc6c1799d2b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b00200009d01d8044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f00110100010003fd17a7490c22a79c59281fb7b20f5e6553ec0c1637ae382e8adaea295f50241037f8997cf42c1ce26417e015091451b15424b2c59eb8d4161b0975630408e394d3b00f88d4b4e18e2cc85e8251d4753a27c639c83f5ad4a571c4f19d7cd460b9b73c25ade730c99df09637bd173d8e3e981ac64432078263bb6dc30d3e974150dd0200d0ee05be3d4604d2146fb0457f31ba17c057560785aa804e8ca5530a7cd81d3440d0f4ba6851efcfd3954b7e68908fc0ba47f7ac37bf559c6c168b70d3a7c8cd0200da1c677c4bce06a068070f2b3733b0a714e88d62aa3f9a26c6f5216d48d5c2b5624144f3807c0df30be66b3268eeeca4df1fbded58faf49fc95dc3c35f134f8b01fd1396b6c0fc1b6c4f0eb8f5e44b8eace1e6073e20d0b8bc5385f86f1cf3f050f66af789f3ef1fc107b7f4421e19e0349c730c68f0a226981f4e889054fdb4dc149e8e889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab00200009501fe044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001fe030302e9030f3c783e14856063f16938530e148bc57a7aa3f3e4f90df9dceccdc779bc0835e1ad3d006e4a8d7b36d08b8e0de5a0d947254ecfbd22037e6572b426bcfdc517796b224b0036ff90bc574b5509bede85512f2eefb520fb4b02aa523ba739bff424a6fe81c5041f253f8d757e69a503d3563a104d0d49e9e890b9d0c26f96b55b743883b472caa7050c4acfd4a21f875bdf1258d88bd61224d303dc9df77f743137d51e6d5246b88c406780528fd9a3e15bab5452e5b93970d9dcc79f48b38651b9f15bfbcf6da452837e9cc70683d1bdca94507870f743e4ad902005812488dd342f836e72869afd00ce1850eea4cfa53ce10e3608e13d3c149394ee3cbd0e23d018fcbcb6e2ec5a1a22972d1d462ca05355d0d290dd2751e550d5efb38c6c89686344df64852bf4ff86638708f644e8ec6bd4af9b50d8541cb91891a431326ab2e332faa7ae86cfb6e0540aa63160c1e5cdd5a4add518b303fff0a20117c6bc77f7cfbaf36b04c865c6c2b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b00200009d01fe044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001fe030302e9030f3c783e148560f936097339ae381d63116efcf802ff8b1c9360767db5219cc987375702a4123fd8657d3e22700f23f95020d1b261eda5257e9a72f9a918e8ef22dd5b3323ae03bbc1923dd224db988cadc16acc04b120a9f8b7e84da9716c53e0334d7b66586ddb9014df604b41be1e960dcfcbc96f4ed150a1a0dd070b9eb14276b9b6be413a769a75b519a53d3ecc0c220e85cd91ca354d57e7344517e64b43b6e29823cbd87eae26e2b2e78e6dedfbb76e3e9f77bcb844f9a8932eb3db2c3f9e44316e6f5d60e9e2a56e46b72abe6b06dc9a31cc63f10023d1f5e12d2a3ee93b675c96f504af0001220991c88db759e231b3320dcedf814dcf723fd9857e3d72d66a0f2af26950b915abdf56c1596f46a325bf17ad4810d3535fb02a259b247ac3dbd4cc3ecf9c51b6c07cebb009c1506fba0a89321ec8683e3fd009a6e551d50243e2d5092fefb3321083a4bad91320dc624bd6b5dddf93553e3d53924c05bfebec1fb4bd47e89a1a889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020000" + +const dsaElGamalTestKeysHex = "9501e1044dfcb16a110400aa3e5c1a1f43dd28c2ffae8abf5cfce555ee874134d8ba0a0f7b868ce2214beddc74e5e1e21ded354a95d18acdaf69e5e342371a71fbb9093162e0c5f3427de413a7f2c157d83f5cd2f9d791256dc4f6f0e13f13c3302af27f2384075ab3021dff7a050e14854bbde0a1094174855fc02f0bae8e00a340d94a1f22b32e48485700a0cec672ac21258fb95f61de2ce1af74b2c4fa3e6703ff698edc9be22c02ae4d916e4fa223f819d46582c0516235848a77b577ea49018dcd5e9e15cff9dbb4663a1ae6dd7580fa40946d40c05f72814b0f88481207e6c0832c3bded4853ebba0a7e3bd8e8c66df33d5a537cd4acf946d1080e7a3dcea679cb2b11a72a33a2b6a9dc85f466ad2ddf4c3db6283fa645343286971e3dd700703fc0c4e290d45767f370831a90187e74e9972aae5bff488eeff7d620af0362bfb95c1a6c3413ab5d15a2e4139e5d07a54d72583914661ed6a87cce810be28a0aa8879a2dd39e52fb6fe800f4f181ac7e328f740cde3d09a05cecf9483e4cca4253e60d4429ffd679d9996a520012aad119878c941e3cf151459873bdfc2a9563472fe0303027a728f9feb3b864260a1babe83925ce794710cfd642ee4ae0e5b9d74cee49e9c67b6cd0ea5dfbb582132195a121356a1513e1bca73e5b80c58c7ccb4164453412f456c47616d616c2054657374204b65792031886204131102002205024dfcb16a021b03060b090807030206150802090a0b0416020301021e01021780000a091033af447ccd759b09fadd00a0b8fd6f5a790bad7e9f2dbb7632046dc4493588db009c087c6a9ba9f7f49fab221587a74788c00db4889ab00200009d0157044dfcb16a1004008dec3f9291205255ccff8c532318133a6840739dd68b03ba942676f9038612071447bf07d00d559c5c0875724ea16a4c774f80d8338b55fca691a0522e530e604215b467bbc9ccfd483a1da99d7bc2648b4318fdbd27766fc8bfad3fddb37c62b8ae7ccfe9577e9b8d1e77c1d417ed2c2ef02d52f4da11600d85d3229607943700030503ff506c94c87c8cab778e963b76cf63770f0a79bf48fb49d3b4e52234620fc9f7657f9f8d56c96a2b7c7826ae6b57ebb2221a3fe154b03b6637cea7e6d98e3e45d87cf8dc432f723d3d71f89c5192ac8d7290684d2c25ce55846a80c9a7823f6acd9bb29fa6cd71f20bc90eccfca20451d0c976e460e672b000df49466408d527affe0303027a728f9feb3b864260abd761730327bca2aaa4ea0525c175e92bf240682a0e83b226f97ecb2e935b62c9a133858ce31b271fa8eb41f6a1b3cd72a63025ce1a75ee4180dcc284884904181102000905024dfcb16a021b0c000a091033af447ccd759b09dd0b009e3c3e7296092c81bee5a19929462caaf2fff3ae26009e218c437a2340e7ea628149af1ec98ec091a43992b00200009501e1044dfcb1be1104009f61faa61aa43df75d128cbe53de528c4aec49ce9360c992e70c77072ad5623de0a3a6212771b66b39a30dad6781799e92608316900518ec01184a85d872365b7d2ba4bacfb5882ea3c2473d3750dc6178cc1cf82147fb58caa28b28e9f12f6d1efcb0534abed644156c91cca4ab78834268495160b2400bc422beb37d237c2300a0cac94911b6d493bda1e1fbc6feeca7cb7421d34b03fe22cec6ccb39675bb7b94a335c2b7be888fd3906a1125f33301d8aa6ec6ee6878f46f73961c8d57a3e9544d8ef2a2cbfd4d52da665b1266928cfe4cb347a58c412815f3b2d2369dec04b41ac9a71cc9547426d5ab941cccf3b18575637ccfb42df1a802df3cfe0a999f9e7109331170e3a221991bf868543960f8c816c28097e503fe319db10fb98049f3a57d7c80c420da66d56f3644371631fad3f0ff4040a19a4fedc2d07727a1b27576f75a4d28c47d8246f27071e12d7a8de62aad216ddbae6aa02efd6b8a3e2818cda48526549791ab277e447b3a36c57cefe9b592f5eab73959743fcc8e83cbefec03a329b55018b53eec196765ae40ef9e20521a603c551efe0303020950d53a146bf9c66034d00c23130cce95576a2ff78016ca471276e8227fb30b1ffbd92e61804fb0c3eff9e30b1a826ee8f3e4730b4d86273ca977b4164453412f456c47616d616c2054657374204b65792032886204131102002205024dfcb1be021b03060b090807030206150802090a0b0416020301021e01021780000a0910a86bf526325b21b22bd9009e34511620415c974750a20df5cb56b182f3b48e6600a0a9466cb1a1305a84953445f77d461593f1d42bc1b00200009d0157044dfcb1be1004009565a951da1ee87119d600c077198f1c1bceb0f7aa54552489298e41ff788fa8f0d43a69871f0f6f77ebdfb14a4260cf9fbeb65d5844b4272a1904dd95136d06c3da745dc46327dd44a0f16f60135914368c8039a34033862261806bb2c5ce1152e2840254697872c85441ccb7321431d75a747a4bfb1d2c66362b51ce76311700030503fc0ea76601c196768070b7365a200e6ddb09307f262d5f39eec467b5f5784e22abdf1aa49226f59ab37cb49969d8f5230ea65caf56015abda62604544ed526c5c522bf92bed178a078789f6c807b6d34885688024a5bed9e9f8c58d11d4b82487b44c5f470c5606806a0443b79cadb45e0f897a561a53f724e5349b9267c75ca17fe0303020950d53a146bf9c660bc5f4ce8f072465e2d2466434320c1e712272fafc20e342fe7608101580fa1a1a367e60486a7cd1246b7ef5586cf5e10b32762b710a30144f12dd17dd4884904181102000905024dfcb1be021b0c000a0910a86bf526325b21b2904c00a0b2b66b4b39ccffda1d10f3ea8d58f827e30a8b8e009f4255b2d8112a184e40cde43a34e8655ca7809370b0020000" + +const signedMessageHex = "a3019bc0cbccc0c4b8d8b74ee2108fe16ec6d3ca490cbe362d3f8333d3f352531472538b8b13d353b97232f352158c20943157c71c16064626063656269052062e4e01987e9b6fccff4b7df3a34c534b23e679cbec3bc0f8f6e64dfb4b55fe3f8efa9ce110ddb5cd79faf1d753c51aecfa669f7e7aa043436596cccc3359cb7dd6bbe9ecaa69e5989d9e57209571edc0b2fa7f57b9b79a64ee6e99ce1371395fee92fec2796f7b15a77c386ff668ee27f6d38f0baa6c438b561657377bf6acff3c5947befd7bf4c196252f1d6e5c524d0300" + +const signedTextMessageHex = "a3019bc0cbccc8c4b8d8b74ee2108fe16ec6d36a250cbece0c178233d3f352531472538b8b13d35379b97232f352158ca0b4312f57c71c1646462606365626906a062e4e019811591798ff99bf8afee860b0d8a8c2a85c3387e3bcf0bb3b17987f2bbcfab2aa526d930cbfd3d98757184df3995c9f3e7790e36e3e9779f06089d4c64e9e47dd6202cb6e9bc73c5d11bb59fbaf89d22d8dc7cf199ddf17af96e77c5f65f9bbed56f427bd8db7af37f6c9984bf9385efaf5f184f986fb3e6adb0ecfe35bbf92d16a7aa2a344fb0bc52fb7624f0200" + +const signedEncryptedMessageHex = "c18c032a67d68660df41c70103ff5a84c9a72f80e74ef0384c2d6a9ebfe2b09e06a8f298394f6d2abf174e40934ab0ec01fb2d0ddf21211c6fe13eb238563663b017a6b44edca552eb4736c4b7dc6ed907dd9e12a21b51b64b46f902f76fb7aaf805c1db8070574d8d0431a23e324a750f77fb72340a17a42300ee4ca8207301e95a731da229a63ab9c6b44541fbd2c11d016d810b3b3b2b38f15b5b40f0a4910332829c2062f1f7cc61f5b03677d73c54cafa1004ced41f315d46444946faae571d6f426e6dbd45d9780eb466df042005298adabf7ce0ef766dfeb94cd449c7ed0046c880339599c4711af073ce649b1e237c40b50a5536283e03bdbb7afad78bd08707715c67fb43295f905b4c479178809d429a8e167a9a8c6dfd8ab20b4edebdc38d6dec879a3202e1b752690d9bb5b0c07c5a227c79cc200e713a99251a4219d62ad5556900cf69bd384b6c8e726c7be267471d0d23af956da165af4af757246c2ebcc302b39e8ef2fccb4971b234fcda22d759ddb20e27269ee7f7fe67898a9de721bfa02ab0becaa046d00ea16cb1afc4e2eab40d0ac17121c565686e5cbd0cbdfbd9d6db5c70278b9c9db5a83176d04f61fbfbc4471d721340ede2746e5c312ded4f26787985af92b64fae3f253dbdde97f6a5e1996fd4d865599e32ff76325d3e9abe93184c02988ee89a4504356a4ef3b9b7a57cbb9637ca90af34a7676b9ef559325c3cca4e29d69fec1887f5440bb101361d744ad292a8547f22b4f22b419a42aa836169b89190f46d9560824cb2ac6e8771de8223216a5e647e132ab9eebcba89569ab339cb1c3d70fe806b31f4f4c600b4103b8d7583ebff16e43dcda551e6530f975122eb8b29" + +const verifiedSignatureEncryptedMessageHex = "c2b304000108000605026048f6d600210910a34d7e18c20c31bb1621045fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb9a3b0400a32ddac1af259c1b0abab0041327ea04970944401978fb647dd1cf9aba4f164e43f0d8a9389501886474bdd4a6e77f6aea945c07dfbf87743835b44cc2c39a1f9aeecfa83135abc92e18e50396f2e6a06c44e0188b0081effbfb4160d28f118d4ff73dd199a102e47cffd8c7ff2bacd83ae72b5820c021a486766dd587b5da61" + +const unverifiedSignatureEncryptedMessageHex = "c2b304000108000605026048f6d600210910a34d7e18c20c31bb1621045fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb9a3b0400a32ddac1af259c1b0abab0041327ea04970944401978fb647dd1cf9aba4f164e43f0d8a9389501886474bdd4a6e77f6aea945c07dfbf87743835b44cc2c39a1f9aeecfa83135abc92e18e50396f2e6a06c44e0188b0081effbfb4160d28f118d4ff73dd199a102e47cffd8c7ff2bacd83ae72b5820c021a486766dd587b5da61" + +const signedEncryptedMessage2Hex = "85010e03cf6a7abcd43e36731003fb057f5495b79db367e277cdbe4ab90d924ddee0c0381494112ff8c1238fb0184af35d1731573b01bc4c55ecacd2aafbe2003d36310487d1ecc9ac994f3fada7f9f7f5c3a64248ab7782906c82c6ff1303b69a84d9a9529c31ecafbcdb9ba87e05439897d87e8a2a3dec55e14df19bba7f7bd316291c002ae2efd24f83f9e3441203fc081c0c23dc3092a454ca8a082b27f631abf73aca341686982e8fbda7e0e7d863941d68f3de4a755c2964407f4b5e0477b3196b8c93d551dd23c8beef7d0f03fbb1b6066f78907faf4bf1677d8fcec72651124080e0b7feae6b476e72ab207d38d90b958759fdedfc3c6c35717c9dbfc979b3cfbbff0a76d24a5e57056bb88acbd2a901ef64bc6e4db02adc05b6250ff378de81dca18c1910ab257dff1b9771b85bb9bbe0a69f5989e6d1710a35e6dfcceb7d8fb5ccea8db3932b3d9ff3fe0d327597c68b3622aec8e3716c83a6c93f497543b459b58ba504ed6bcaa747d37d2ca746fe49ae0a6ce4a8b694234e941b5159ff8bd34b9023da2814076163b86f40eed7c9472f81b551452d5ab87004a373c0172ec87ea6ce42ccfa7dbdad66b745496c4873d8019e8c28d6b3" + +const signatureEncryptedMessage2Hex = "c24604001102000605024dfd0166000a091033af447ccd759b09bae600a096ec5e63ecf0a403085e10f75cc3bab327663282009f51fad9df457ed8d2b70d8a73c76e0443eac0f377" + +const symmetricallyEncryptedCompressedHex = "c32e040903085a357c1a7b5614ed00cc0d1d92f428162058b3f558a0fb0980d221ebac6c97d5eda4e0fe32f6e706e94dd263012d6ca1ef8c4bbd324098225e603a10c85ebf09cbf7b5aeeb5ce46381a52edc51038b76a8454483be74e6dcd1e50d5689a8ae7eceaeefed98a0023d49b22eb1f65c2aa1ef1783bb5e1995713b0457102ec3c3075fe871267ffa4b686ad5d52000d857" + +const dsaTestKeyHex = "9901a2044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794" + +const dsaTestKeyPrivateHex = "9501bb044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4d00009f592e0619d823953577d4503061706843317e4fee083db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794" + +const p256TestKeyHex = "98520456e5b83813082a8648ce3d030107020304a2072cd6d21321266c758cc5b83fab0510f751cb8d91897cddb7047d8d6f185546e2107111b0a95cb8ef063c33245502af7a65f004d5919d93ee74eb71a66253b424502d3235362054657374204b6579203c696e76616c6964406578616d706c652e636f6d3e8879041313080021050256e5b838021b03050b09080702061508090a0b020416020301021e01021780000a0910d44a2c495918513e54e50100dfa64f97d9b47766fc1943c6314ba3f2b2a103d71ad286dc5b1efb96a345b0c80100dbc8150b54241f559da6ef4baacea6d31902b4f4b1bdc09b34bf0502334b7754b8560456e5b83812082a8648ce3d030107020304bfe3cea9cee13486f8d518aa487fecab451f25467d2bf08e58f63e5fa525d5482133e6a79299c274b068ef0be448152ad65cf11cf764348588ca4f6a0bcf22b6030108078861041813080009050256e5b838021b0c000a0910d44a2c495918513e4a4800ff49d589fa64024ad30be363a032e3a0e0e6f5db56ba4c73db850518bf0121b8f20100fd78e065f4c70ea5be9df319ea67e493b936fc78da834a71828043d3154af56e" + +const p256TestKeyPrivateHex = "94a50456e5b83813082a8648ce3d030107020304a2072cd6d21321266c758cc5b83fab0510f751cb8d91897cddb7047d8d6f185546e2107111b0a95cb8ef063c33245502af7a65f004d5919d93ee74eb71a66253fe070302f0c2bfb0b6c30f87ee1599472b8636477eab23ced13b271886a4b50ed34c9d8436af5af5b8f88921f0efba6ef8c37c459bbb88bc1c6a13bbd25c4ce9b1e97679569ee77645d469bf4b43de637f5561b424502d3235362054657374204b6579203c696e76616c6964406578616d706c652e636f6d3e8879041313080021050256e5b838021b03050b09080702061508090a0b020416020301021e01021780000a0910d44a2c495918513e54e50100dfa64f97d9b47766fc1943c6314ba3f2b2a103d71ad286dc5b1efb96a345b0c80100dbc8150b54241f559da6ef4baacea6d31902b4f4b1bdc09b34bf0502334b77549ca90456e5b83812082a8648ce3d030107020304bfe3cea9cee13486f8d518aa487fecab451f25467d2bf08e58f63e5fa525d5482133e6a79299c274b068ef0be448152ad65cf11cf764348588ca4f6a0bcf22b603010807fe0703027510012471a603cfee2968dce19f732721ddf03e966fd133b4e3c7a685b788705cbc46fb026dc94724b830c9edbaecd2fb2c662f23169516cacd1fe423f0475c364ecc10abcabcfd4bbbda1a36a1bd8861041813080009050256e5b838021b0c000a0910d44a2c495918513e4a4800ff49d589fa64024ad30be363a032e3a0e0e6f5db56ba4c73db850518bf0121b8f20100fd78e065f4c70ea5be9df319ea67e493b936fc78da834a71828043d3154af56e" + +const armoredPrivateKeyBlock = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Version: GnuPG v1.4.10 (GNU/Linux) + +lQHYBE2rFNoBBADFwqWQIW/DSqcB4yCQqnAFTJ27qS5AnB46ccAdw3u4Greeu3Bp +idpoHdjULy7zSKlwR1EA873dO/k/e11Ml3dlAFUinWeejWaK2ugFP6JjiieSsrKn +vWNicdCS4HTWn0X4sjl0ZiAygw6GNhqEQ3cpLeL0g8E9hnYzJKQ0LWJa0QARAQAB +AAP/TB81EIo2VYNmTq0pK1ZXwUpxCrvAAIG3hwKjEzHcbQznsjNvPUihZ+NZQ6+X +0HCfPAdPkGDCLCb6NavcSW+iNnLTrdDnSI6+3BbIONqWWdRDYJhqZCkqmG6zqSfL +IdkJgCw94taUg5BWP/AAeQrhzjChvpMQTVKQL5mnuZbUCeMCAN5qrYMP2S9iKdnk +VANIFj7656ARKt/nf4CBzxcpHTyB8+d2CtPDKCmlJP6vL8t58Jmih+kHJMvC0dzn +gr5f5+sCAOOe5gt9e0am7AvQWhdbHVfJU0TQJx+m2OiCJAqGTB1nvtBLHdJnfdC9 +TnXXQ6ZXibqLyBies/xeY2sCKL5qtTMCAKnX9+9d/5yQxRyrQUHt1NYhaXZnJbHx +q4ytu0eWz+5i68IYUSK69jJ1NWPM0T6SkqpB3KCAIv68VFm9PxqG1KmhSrQIVGVz +dCBLZXmIuAQTAQIAIgUCTasU2gIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AA +CgkQO9o98PRieSoLhgQAkLEZex02Qt7vGhZzMwuN0R22w3VwyYyjBx+fM3JFETy1 +ut4xcLJoJfIaF5ZS38UplgakHG0FQ+b49i8dMij0aZmDqGxrew1m4kBfjXw9B/v+ +eIqpODryb6cOSwyQFH0lQkXC040pjq9YqDsO5w0WYNXYKDnzRV0p4H1pweo2VDid +AdgETasU2gEEAN46UPeWRqKHvA99arOxee38fBt2CI08iiWyI8T3J6ivtFGixSqV +bRcPxYO/qLpVe5l84Nb3X71GfVXlc9hyv7CD6tcowL59hg1E/DC5ydI8K8iEpUmK +/UnHdIY5h8/kqgGxkY/T/hgp5fRQgW1ZoZxLajVlMRZ8W4tFtT0DeA+JABEBAAEA +A/0bE1jaaZKj6ndqcw86jd+QtD1SF+Cf21CWRNeLKnUds4FRRvclzTyUMuWPkUeX +TaNNsUOFqBsf6QQ2oHUBBK4VCHffHCW4ZEX2cd6umz7mpHW6XzN4DECEzOVksXtc +lUC1j4UB91DC/RNQqwX1IV2QLSwssVotPMPqhOi0ZLNY7wIA3n7DWKInxYZZ4K+6 +rQ+POsz6brEoRHwr8x6XlHenq1Oki855pSa1yXIARoTrSJkBtn5oI+f8AzrnN0BN +oyeQAwIA/7E++3HDi5aweWrViiul9cd3rcsS0dEnksPhvS0ozCJiHsq/6GFmy7J8 +QSHZPteedBnZyNp5jR+H7cIfVN3KgwH/Skq4PsuPhDq5TKK6i8Pc1WW8MA6DXTdU +nLkX7RGmMwjC0DBf7KWAlPjFaONAX3a8ndnz//fy1q7u2l9AZwrj1qa1iJ8EGAEC +AAkFAk2rFNoCGwwACgkQO9o98PRieSo2/QP/WTzr4ioINVsvN1akKuekmEMI3LAp +BfHwatufxxP1U+3Si/6YIk7kuPB9Hs+pRqCXzbvPRrI8NHZBmc8qIGthishdCYad +AHcVnXjtxrULkQFGbGvhKURLvS9WnzD/m1K2zzwxzkPTzT9/Yf06O6Mal5AdugPL +VrM0m72/jnpKo04= +=zNCn +-----END PGP PRIVATE KEY BLOCK-----` + +const e2ePublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Charset: UTF-8 + +xv8AAABSBAAAAAATCCqGSM49AwEHAgME1LRoXSpOxtHXDUdmuvzchyg6005qIBJ4 +sfaSxX7QgH9RV2ONUhC+WiayCNADq+UMzuR/vunSr4aQffXvuGnR383/AAAAFDxk +Z2lsQHlhaG9vLWluYy5jb20+wv8AAACGBBATCAA4/wAAAAWCVGvAG/8AAAACiwn/ +AAAACZC2VkQCOjdvYf8AAAAFlQgJCgv/AAAAA5YBAv8AAAACngEAAE1BAP0X8veD +24IjmI5/C6ZAfVNXxgZZFhTAACFX75jUA3oD6AEAzoSwKf1aqH6oq62qhCN/pekX ++WAsVMBhNwzLpqtCRjLO/wAAAFYEAAAAABIIKoZIzj0DAQcCAwT50ain7vXiIRv8 +B1DO3x3cE/aattZ5sHNixJzRCXi2vQIA5QmOxZ6b5jjUekNbdHG3SZi1a2Ak5mfX +fRxC/5VGAwEIB8L/AAAAZQQYEwgAGP8AAAAFglRrwBz/AAAACZC2VkQCOjdvYQAA +FJAA9isX3xtGyMLYwp2F3nXm7QEdY5bq5VUcD/RJlj792VwA/1wH0pCzVLl4Q9F9 +ex7En5r7rHR5xwX82Msc+Rq9dSyO +=7MrZ +-----END PGP PUBLIC KEY BLOCK-----` + +const dsaKeyWithSHA512 = `9901a2044f04b07f110400db244efecc7316553ee08d179972aab87bb1214de7692593fcf5b6feb1c80fba268722dd464748539b85b81d574cd2d7ad0ca2444de4d849b8756bad7768c486c83a824f9bba4af773d11742bdfb4ac3b89ef8cc9452d4aad31a37e4b630d33927bff68e879284a1672659b8b298222fc68f370f3e24dccacc4a862442b9438b00a0ea444a24088dc23e26df7daf8f43cba3bffc4fe703fe3d6cd7fdca199d54ed8ae501c30e3ec7871ea9cdd4cf63cfe6fc82281d70a5b8bb493f922cd99fba5f088935596af087c8d818d5ec4d0b9afa7f070b3d7c1dd32a84fca08d8280b4890c8da1dde334de8e3cad8450eed2a4a4fcc2db7b8e5528b869a74a7f0189e11ef097ef1253582348de072bb07a9fa8ab838e993cef0ee203ff49298723e2d1f549b00559f886cd417a41692ce58d0ac1307dc71d85a8af21b0cf6eaa14baf2922d3a70389bedf17cc514ba0febbd107675a372fe84b90162a9e88b14d4b1c6be855b96b33fb198c46f058568817780435b6936167ebb3724b680f32bf27382ada2e37a879b3d9de2abe0c3f399350afd1ad438883f4791e2e3b4184453412068617368207472756e636174696f6e207465737488620413110a002205024f04b07f021b03060b090807030206150802090a0b0416020301021e01021780000a0910ef20e0cefca131581318009e2bf3bf047a44d75a9bacd00161ee04d435522397009a03a60d51bd8a568c6c021c8d7cf1be8d990d6417b0020003` + +const unknownHashFunctionHex = `8a00000040040001990006050253863c24000a09103b4fe6acc0b21f32ffff01010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101` + +const rsaSignatureBadMPIlength = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101` + +const missingHashFunctionHex = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101` + +const campbellQuine = `a0b001000300fcffa0b001000d00f2ff000300fcffa0b001000d00f2ff8270a01c00000500faff8270a01c00000500faff000500faff001400ebff8270a01c00000500faff000500faff001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400000000ffff000000ffff000b00f4ff428821c400000000ffff000000ffff000b00f4ff0233214c40000100feff000233214c40000100feff0000` + +const keyV4forVerifyingSignedMessageV3 = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Comment: GPGTools - https://gpgtools.org + +mI0EVfxoFQEEAMBIqmbDfYygcvP6Phr1wr1XI41IF7Qixqybs/foBF8qqblD9gIY +BKpXjnBOtbkcVOJ0nljd3/sQIfH4E0vQwK5/4YRQSI59eKOqd6Fx+fWQOLG+uu6z +tewpeCj9LLHvibx/Sc7VWRnrznia6ftrXxJ/wHMezSab3tnGC0YPVdGNABEBAAG0 +JEdvY3J5cHRvIFRlc3QgS2V5IDx0aGVtYXhAZ21haWwuY29tPoi5BBMBCgAjBQJV +/GgVAhsDBwsJCAcDAgEGFQgCCQoLBBYCAwECHgECF4AACgkQeXnQmhdGW9PFVAP+ +K7TU0qX5ArvIONIxh/WAweyOk884c5cE8f+3NOPOOCRGyVy0FId5A7MmD5GOQh4H +JseOZVEVCqlmngEvtHZb3U1VYtVGE5WZ+6rQhGsMcWP5qaT4soYwMBlSYxgYwQcx +YhN9qOr292f9j2Y//TTIJmZT4Oa+lMxhWdqTfX+qMgG4jQRV/GgVAQQArhFSiij1 +b+hT3dnapbEU+23Z1yTu1DfF6zsxQ4XQWEV3eR8v+8mEDDNcz8oyyF56k6UQ3rXi +UMTIwRDg4V6SbZmaFbZYCOwp/EmXJ3rfhm7z7yzXj2OFN22luuqbyVhuL7LRdB0M +pxgmjXb4tTvfgKd26x34S+QqUJ7W6uprY4sAEQEAAYifBBgBCgAJBQJV/GgVAhsM +AAoJEHl50JoXRlvT7y8D/02ckx4OMkKBZo7viyrBw0MLG92i+DC2bs35PooHR6zz +786mitjOp5z2QWNLBvxC70S0qVfCIz8jKupO1J6rq6Z8CcbLF3qjm6h1omUBf8Nd +EfXKD2/2HV6zMKVknnKzIEzauh+eCKS2CeJUSSSryap/QLVAjRnckaES/OsEWhNB +=RZia +-----END PGP PUBLIC KEY BLOCK----- +` + +const signedMessageV3 = `-----BEGIN PGP MESSAGE----- +Comment: GPGTools - https://gpgtools.org + +owGbwMvMwMVYWXlhlrhb9GXG03JJDKF/MtxDMjKLFYAoUaEktbhEITe1uDgxPVWP +q5NhKjMrWAVcC9evD8z/bF/uWNjqtk/X3y5/38XGRQHm/57rrDRYuGnTw597Xqka +uM3137/hH3Os+Jf2dc0fXOITKwJvXJvecPVs0ta+Vg7ZO1MLn8w58Xx+6L58mbka +DGHyU9yTueZE8D+QF/Tz28Y78dqtF56R1VPn9Xw4uJqrWYdd7b3vIZ1V6R4Nh05d +iT57d/OhWwA= +=hG7R +-----END PGP MESSAGE----- +` + +// https://mailarchive.ietf.org/arch/msg/openpgp/9SheW_LENE0Kxf7haNllovPyAdY/ +const v5PrivKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +lGEFXJH05BYAAAAtCSsGAQQB2kcPAQEHQFhZlVcVVtwf+21xNQPX+ecMJJBL0MPd +fj75iux+my8QAAAAAAAiAQCHZ1SnSUmWqxEsoI6facIVZQu6mph3cBFzzTvcm5lA +Ng5ctBhlbW1hLmdvbGRtYW5AZXhhbXBsZS5uZXSIlgUTFggASCIhBRk0e8mHJGQC +X5nfPsLgAA7ZiEiS4fez6kyUAJFZVptUBQJckfTkAhsDBQsJCAcCAyICAQYVCgkI +CwIEFgIDAQIeBwIXgAAA9cAA/jiR3yMsZMeEQ40u6uzEoXa6UXeV/S3wwJAXRJy9 +M8s0AP9vuL/7AyTfFXwwzSjDnYmzS0qAhbLDQ643N+MXGBJ2BZxmBVyR9OQSAAAA +MgorBgEEAZdVAQUBAQdA+nysrzml2UCweAqtpDuncSPlvrcBWKU0yfU0YvYWWAoD +AQgHAAAAAAAiAP9OdAPppjU1WwpqjIItkxr+VPQRT8Zm/Riw7U3F6v3OiBFHiHoF +GBYIACwiIQUZNHvJhyRkAl+Z3z7C4AAO2YhIkuH3s+pMlACRWVabVAUCXJH05AIb +DAAAOSQBAP4BOOIR/sGLNMOfeb5fPs/02QMieoiSjIBnijhob2U5AQC+RtOHCHx7 +TcIYl5/Uyoi+FOvPLcNw4hOv2nwUzSSVAw== +=IiS2 +-----END PGP PRIVATE KEY BLOCK-----` + +// Generated with the above private key +const v5PrivKeyMsg = `-----BEGIN PGP MESSAGE----- +Version: OpenPGP.js v4.10.7 +Comment: https://openpgpjs.org + +xA0DAQoWGTR7yYckZAIByxF1B21zZy50eHRfbIGSdGVzdMJ3BQEWCgAGBQJf +bIGSACMiIQUZNHvJhyRkAl+Z3z7C4AAO2YhIkuH3s+pMlACRWVabVDQvAP9G +y29VPonFXqi2zKkpZrvyvZxg+n5e8Nt9wNbuxeCd3QD/TtO2s+JvjrE4Siwv +UQdl5MlBka1QSNbMq2Bz7XwNPg4= +=6lbM +-----END PGP MESSAGE-----` diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go new file mode 100644 index 0000000000..14f58548bd --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go @@ -0,0 +1,367 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package s2k implements the various OpenPGP string-to-key transforms as +// specified in RFC 4800 section 3.7.1. +package s2k // import "github.com/ProtonMail/go-crypto/openpgp/s2k" + +import ( + "crypto" + "hash" + "io" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" +) + +// Config collects configuration parameters for s2k key-stretching +// transformations. A nil *Config is valid and results in all default +// values. Currently, Config is used only by the Serialize function in +// this package. +type Config struct { + // S2KMode is the mode of s2k function. + // It can be 0 (simple), 1(salted), 3(iterated) + // 2(reserved) 100-110(private/experimental). + S2KMode uint8 + // Hash is the default hash function to be used. If + // nil, SHA256 is used. + Hash crypto.Hash + // S2KCount is only used for symmetric encryption. It + // determines the strength of the passphrase stretching when + // the said passphrase is hashed to produce a key. S2KCount + // should be between 65536 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 16777216 used. Not all + // values in the above range can be represented. S2KCount will + // be rounded up to the next representable value if it cannot + // be encoded exactly. See RFC 4880 Section 3.7.1.3. + S2KCount int +} + +// Params contains all the parameters of the s2k packet +type Params struct { + // mode is the mode of s2k function. + // It can be 0 (simple), 1(salted), 3(iterated) + // 2(reserved) 100-110(private/experimental). + mode uint8 + // hashId is the ID of the hash function used in any of the modes + hashId byte + // salt is a byte array to use as a salt in hashing process + salt []byte + // countByte is used to determine how many rounds of hashing are to + // be performed in s2k mode 3. See RFC 4880 Section 3.7.1.3. + countByte byte +} + +func (c *Config) hash() crypto.Hash { + if c == nil || uint(c.Hash) == 0 { + return crypto.SHA256 + } + + return c.Hash +} + +// EncodedCount get encoded count +func (c *Config) EncodedCount() uint8 { + if c == nil || c.S2KCount == 0 { + return 224 // The common case. Corresponding to 16777216 + } + + i := c.S2KCount + + switch { + case i < 65536: + i = 65536 + case i > 65011712: + i = 65011712 + } + + return encodeCount(i) +} + +// encodeCount converts an iterative "count" in the range 1024 to +// 65011712, inclusive, to an encoded count. The return value is the +// octet that is actually stored in the GPG file. encodeCount panics +// if i is not in the above range (encodedCount above takes care to +// pass i in the correct range). See RFC 4880 Section 3.7.7.1. +func encodeCount(i int) uint8 { + if i < 65536 || i > 65011712 { + panic("count arg i outside the required range") + } + + for encoded := 96; encoded < 256; encoded++ { + count := decodeCount(uint8(encoded)) + if count >= i { + return uint8(encoded) + } + } + + return 255 +} + +// decodeCount returns the s2k mode 3 iterative "count" corresponding to +// the encoded octet c. +func decodeCount(c uint8) int { + return (16 + int(c&15)) << (uint32(c>>4) + 6) +} + +// Simple writes to out the result of computing the Simple S2K function (RFC +// 4880, section 3.7.1.1) using the given hash and input passphrase. +func Simple(out []byte, h hash.Hash, in []byte) { + Salted(out, h, in, nil) +} + +var zero [1]byte + +// Salted writes to out the result of computing the Salted S2K function (RFC +// 4880, section 3.7.1.2) using the given hash, input passphrase and salt. +func Salted(out []byte, h hash.Hash, in []byte, salt []byte) { + done := 0 + var digest []byte + + for i := 0; done < len(out); i++ { + h.Reset() + for j := 0; j < i; j++ { + h.Write(zero[:]) + } + h.Write(salt) + h.Write(in) + digest = h.Sum(digest[:0]) + n := copy(out[done:], digest) + done += n + } +} + +// Iterated writes to out the result of computing the Iterated and Salted S2K +// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase, +// salt and iteration count. +func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) { + combined := make([]byte, len(in)+len(salt)) + copy(combined, salt) + copy(combined[len(salt):], in) + + if count < len(combined) { + count = len(combined) + } + + done := 0 + var digest []byte + for i := 0; done < len(out); i++ { + h.Reset() + for j := 0; j < i; j++ { + h.Write(zero[:]) + } + written := 0 + for written < count { + if written+len(combined) > count { + todo := count - written + h.Write(combined[:todo]) + written = count + } else { + h.Write(combined) + written += len(combined) + } + } + digest = h.Sum(digest[:0]) + n := copy(out[done:], digest) + done += n + } +} + +// Generate generates valid parameters from given configuration. +// It will enforce salted + hashed s2k method +func Generate(rand io.Reader, c *Config) (*Params, error) { + hashId, ok := HashToHashId(c.Hash) + if !ok { + return nil, errors.UnsupportedError("no such hash") + } + + params := &Params{ + mode: 3, // Enforce iterared + salted method + hashId: hashId, + salt: make([]byte, 8), + countByte: c.EncodedCount(), + } + + if _, err := io.ReadFull(rand, params.salt); err != nil { + return nil, err + } + + return params, nil +} + +// Parse reads a binary specification for a string-to-key transformation from r +// and returns a function which performs that transform. If the S2K is a special +// GNU extension that indicates that the private key is missing, then the error +// returned is errors.ErrDummyPrivateKey. +func Parse(r io.Reader) (f func(out, in []byte), err error) { + params, err := ParseIntoParams(r) + if err != nil { + return nil, err + } + + return params.Function() +} + +// ParseIntoParams reads a binary specification for a string-to-key +// transformation from r and returns a struct describing the s2k parameters. +func ParseIntoParams(r io.Reader) (params *Params, err error) { + var buf [9]byte + + _, err = io.ReadFull(r, buf[:2]) + if err != nil { + return + } + + params = &Params{ + mode: buf[0], + hashId: buf[1], + } + + switch params.mode { + case 0: + return params, nil + case 1: + _, err = io.ReadFull(r, buf[:8]) + if err != nil { + return nil, err + } + + params.salt = buf[:8] + return params, nil + case 3: + _, err = io.ReadFull(r, buf[:9]) + if err != nil { + return nil, err + } + + params.salt = buf[:8] + params.countByte = buf[8] + return params, nil + case 101: + // This is a GNU extension. See + // https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;h=fe55ae16ab4e26d8356dc574c9e8bc935e71aef1;hb=23191d7851eae2217ecdac6484349849a24fd94a#l1109 + if _, err = io.ReadFull(r, buf[:4]); err != nil { + return nil, err + } + if buf[0] == 'G' && buf[1] == 'N' && buf[2] == 'U' && buf[3] == 1 { + return params, nil + } + return nil, errors.UnsupportedError("GNU S2K extension") + } + + return nil, errors.UnsupportedError("S2K function") +} + +func (params *Params) Dummy() bool { + return params != nil && params.mode == 101 +} + +func (params *Params) Function() (f func(out, in []byte), err error) { + if params.Dummy() { + return nil, errors.ErrDummyPrivateKey("dummy key found") + } + hashObj, ok := HashIdToHash(params.hashId) + if !ok { + return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(params.hashId))) + } + if !hashObj.Available() { + return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashObj))) + } + + switch params.mode { + case 0: + f := func(out, in []byte) { + Simple(out, hashObj.New(), in) + } + + return f, nil + case 1: + f := func(out, in []byte) { + Salted(out, hashObj.New(), in, params.salt) + } + + return f, nil + case 3: + f := func(out, in []byte) { + Iterated(out, hashObj.New(), in, params.salt, decodeCount(params.countByte)) + } + + return f, nil + } + + return nil, errors.UnsupportedError("S2K function") +} + +func (params *Params) Serialize(w io.Writer) (err error) { + if _, err = w.Write([]byte{params.mode}); err != nil { + return + } + if _, err = w.Write([]byte{params.hashId}); err != nil { + return + } + if params.Dummy() { + _, err = w.Write(append([]byte("GNU"), 1)) + return + } + if params.mode > 0 { + if _, err = w.Write(params.salt); err != nil { + return + } + if params.mode == 3 { + _, err = w.Write([]byte{params.countByte}) + } + } + return +} + +// Serialize salts and stretches the given passphrase and writes the +// resulting key into key. It also serializes an S2K descriptor to +// w. The key stretching can be configured with c, which may be +// nil. In that case, sensible defaults will be used. +func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error { + params, err := Generate(rand, c) + if err != nil { + return err + } + err = params.Serialize(w) + if err != nil { + return err + } + + f, err := params.Function() + if err != nil { + return err + } + f(key, passphrase) + return nil +} + +// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP +// hash id. +func HashIdToHash(id byte) (h crypto.Hash, ok bool) { + if hash, ok := algorithm.HashById[id]; ok { + return hash.HashFunc(), true + } + return 0, false +} + +// HashIdToString returns the name of the hash function corresponding to the +// given OpenPGP hash id. +func HashIdToString(id byte) (name string, ok bool) { + if hash, ok := algorithm.HashById[id]; ok { + return hash.String(), true + } + return "", false +} + +// HashIdToHash returns an OpenPGP hash id which corresponds the given Hash. +func HashToHashId(h crypto.Hash) (id byte, ok bool) { + for id, hash := range algorithm.HashById { + if hash.HashFunc() == h { + return id, true + } + } + return 0, false +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go new file mode 100644 index 0000000000..f0d7fe10de --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go @@ -0,0 +1,568 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "crypto" + "hash" + "io" + "strconv" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/armor" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/packet" + "github.com/ProtonMail/go-crypto/openpgp/s2k" +) + +// DetachSign signs message with the private key from signer (which must +// already have been decrypted) and writes the signature to w. +// If config is nil, sensible defaults will be used. +func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return detachSign(w, signer, message, packet.SigTypeBinary, config) +} + +// ArmoredDetachSign signs message with the private key from signer (which +// must already have been decrypted) and writes an armored signature to w. +// If config is nil, sensible defaults will be used. +func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) { + return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config) +} + +// DetachSignText signs message (after canonicalising the line endings) with +// the private key from signer (which must already have been decrypted) and +// writes the signature to w. +// If config is nil, sensible defaults will be used. +func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return detachSign(w, signer, message, packet.SigTypeText, config) +} + +// ArmoredDetachSignText signs message (after canonicalising the line endings) +// with the private key from signer (which must already have been decrypted) +// and writes an armored signature to w. +// If config is nil, sensible defaults will be used. +func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return armoredDetachSign(w, signer, message, packet.SigTypeText, config) +} + +func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + out, err := armor.Encode(w, SignatureType, nil) + if err != nil { + return + } + err = detachSign(out, signer, message, sigType, config) + if err != nil { + return + } + return out.Close() +} + +func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + signingKey, ok := signer.SigningKeyById(config.Now(), config.SigningKey()) + if !ok { + return errors.InvalidArgumentError("no valid signing keys") + } + if signingKey.PrivateKey == nil { + return errors.InvalidArgumentError("signing key doesn't have a private key") + } + if signingKey.PrivateKey.Encrypted { + return errors.InvalidArgumentError("signing key is encrypted") + } + + sig := new(packet.Signature) + sig.SigType = sigType + sig.PubKeyAlgo = signingKey.PrivateKey.PubKeyAlgo + sig.Hash = config.Hash() + sig.CreationTime = config.Now() + sigLifetimeSecs := config.SigLifetime() + sig.SigLifetimeSecs = &sigLifetimeSecs + sig.IssuerKeyId = &signingKey.PrivateKey.KeyId + + h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType) + if err != nil { + return + } + if _, err = io.Copy(wrappedHash, message); err != nil { + return err + } + + err = sig.Sign(h, signingKey.PrivateKey, config) + if err != nil { + return + } + + return sig.Serialize(w) +} + +// FileHints contains metadata about encrypted files. This metadata is, itself, +// encrypted. +type FileHints struct { + // IsBinary can be set to hint that the contents are binary data. + IsBinary bool + // FileName hints at the name of the file that should be written. It's + // truncated to 255 bytes if longer. It may be empty to suggest that the + // file should not be written to disk. It may be equal to "_CONSOLE" to + // suggest the data should not be written to disk. + FileName string + // ModTime contains the modification time of the file, or the zero time if not applicable. + ModTime time.Time +} + +// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase. +// The resulting WriteCloser must be closed after the contents of the file have +// been written. +// If config is nil, sensible defaults will be used. +func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + if hints == nil { + hints = &FileHints{} + } + + key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config) + if err != nil { + return + } + + var w io.WriteCloser + if config.AEAD() != nil { + w, err = packet.SerializeAEADEncrypted(ciphertext, key, config.Cipher(), config.AEAD().Mode(), config) + if err != nil { + return + } + } else { + w, err = packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), key, config) + if err != nil { + return + } + } + + literalData := w + if algo := config.Compression(); algo != packet.CompressionNone { + var compConfig *packet.CompressionConfig + if config != nil { + compConfig = config.CompressionConfig + } + literalData, err = packet.SerializeCompressed(w, algo, compConfig) + if err != nil { + return + } + } + + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + return packet.SerializeLiteral(literalData, hints.IsBinary, hints.FileName, epochSeconds) +} + +// intersectPreferences mutates and returns a prefix of a that contains only +// the values in the intersection of a and b. The order of a is preserved. +func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) { + var j int + for _, v := range a { + for _, v2 := range b { + if v == v2 { + a[j] = v + j++ + break + } + } + } + + return a[:j] +} + +func hashToHashId(h crypto.Hash) uint8 { + v, ok := s2k.HashToHashId(h) + if !ok { + panic("tried to convert unknown hash") + } + return v +} + +// EncryptText encrypts a message to a number of recipients and, optionally, +// signs it. Optional information is contained in 'hints', also encrypted, that +// aids the recipients in processing the message. The resulting WriteCloser +// must be closed after the contents of the file have been written. If config +// is nil, sensible defaults will be used. The signing is done in text mode. +func EncryptText(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + return encrypt(ciphertext, ciphertext, to, signed, hints, packet.SigTypeText, config) +} + +// Encrypt encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + return encrypt(ciphertext, ciphertext, to, signed, hints, packet.SigTypeBinary, config) +} + +// EncryptSplit encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func EncryptSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeBinary, config) +} + +// EncryptTextSplit encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func EncryptTextSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeText, config) +} + +// writeAndSign writes the data as a payload package and, optionally, signs +// it. hints contains optional information, that is also encrypted, +// that aids the recipients in processing the message. The resulting +// WriteCloser must be closed after the contents of the file have been +// written. If config is nil, sensible defaults will be used. +func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entity, hints *FileHints, sigType packet.SignatureType, config *packet.Config) (plaintext io.WriteCloser, err error) { + var signer *packet.PrivateKey + if signed != nil { + signKey, ok := signed.SigningKeyById(config.Now(), config.SigningKey()) + if !ok { + return nil, errors.InvalidArgumentError("no valid signing keys") + } + signer = signKey.PrivateKey + if signer == nil { + return nil, errors.InvalidArgumentError("no private key in signing key") + } + if signer.Encrypted { + return nil, errors.InvalidArgumentError("signing key must be decrypted") + } + } + + var hash crypto.Hash + for _, hashId := range candidateHashes { + if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() { + hash = h + break + } + } + + // If the hash specified by config is a candidate, we'll use that. + if configuredHash := config.Hash(); configuredHash.Available() { + for _, hashId := range candidateHashes { + if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash { + hash = h + break + } + } + } + + if hash == 0 { + hashId := candidateHashes[0] + name, ok := s2k.HashIdToString(hashId) + if !ok { + name = "#" + strconv.Itoa(int(hashId)) + } + return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)") + } + + if signer != nil { + ops := &packet.OnePassSignature{ + SigType: sigType, + Hash: hash, + PubKeyAlgo: signer.PubKeyAlgo, + KeyId: signer.KeyId, + IsLast: true, + } + if err := ops.Serialize(payload); err != nil { + return nil, err + } + } + + if hints == nil { + hints = &FileHints{} + } + + w := payload + if signer != nil { + // If we need to write a signature packet after the literal + // data then we need to stop literalData from closing + // encryptedData. + w = noOpCloser{w} + + } + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds) + if err != nil { + return nil, err + } + + if signer != nil { + h, wrappedHash, err := hashForSignature(hash, sigType) + if err != nil { + return nil, err + } + metadata := &packet.LiteralData{ + Format: 't', + FileName: hints.FileName, + Time: epochSeconds, + } + if hints.IsBinary { + metadata.Format = 'b' + } + return signatureWriter{payload, literalData, hash, wrappedHash, h, signer, sigType, config, metadata}, nil + } + return literalData, nil +} + +// encrypt encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, sigType packet.SignatureType, config *packet.Config) (plaintext io.WriteCloser, err error) { + if len(to) == 0 { + return nil, errors.InvalidArgumentError("no encryption recipient provided") + } + + // These are the possible ciphers that we'll use for the message. + candidateCiphers := []uint8{ + uint8(packet.CipherAES128), + uint8(packet.CipherAES256), + uint8(packet.CipherCAST5), + } + // These are the possible hash functions that we'll use for the signature. + candidateHashes := []uint8{ + hashToHashId(crypto.SHA256), + hashToHashId(crypto.SHA384), + hashToHashId(crypto.SHA512), + hashToHashId(crypto.SHA1), + hashToHashId(crypto.RIPEMD160), + } + candidateAeadModes := []uint8{ + uint8(packet.AEADModeEAX), + uint8(packet.AEADModeOCB), + uint8(packet.AEADModeExperimentalGCM), + } + candidateCompression := []uint8{ + uint8(packet.CompressionNone), + uint8(packet.CompressionZIP), + uint8(packet.CompressionZLIB), + } + // In the event that a recipient doesn't specify any supported ciphers + // or hash functions, these are the ones that we assume that every + // implementation supports. + defaultCiphers := candidateCiphers[0:1] + defaultHashes := candidateHashes[0:1] + defaultAeadModes := candidateAeadModes[0:1] + defaultCompression := candidateCompression[0:1] + + encryptKeys := make([]Key, len(to)) + // AEAD is used only if every key supports it. + aeadSupported := true + + for i := range to { + var ok bool + encryptKeys[i], ok = to[i].EncryptionKey(config.Now()) + if !ok { + return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys") + } + + sig := to[i].PrimaryIdentity().SelfSignature + if sig.AEAD == false { + aeadSupported = false + } + + preferredSymmetric := sig.PreferredSymmetric + if len(preferredSymmetric) == 0 { + preferredSymmetric = defaultCiphers + } + preferredHashes := sig.PreferredHash + if len(preferredHashes) == 0 { + preferredHashes = defaultHashes + } + preferredAeadModes := sig.PreferredAEAD + if len(preferredAeadModes) == 0 { + preferredAeadModes = defaultAeadModes + } + preferredCompression := sig.PreferredCompression + if len(preferredCompression) == 0 { + preferredCompression = defaultCompression + } + candidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric) + candidateHashes = intersectPreferences(candidateHashes, preferredHashes) + candidateAeadModes = intersectPreferences(candidateAeadModes, preferredAeadModes) + candidateCompression = intersectPreferences(candidateCompression, preferredCompression) + } + + if len(candidateCiphers) == 0 || len(candidateHashes) == 0 || len(candidateAeadModes) == 0 { + return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common algorithms") + } + + cipher := packet.CipherFunction(candidateCiphers[0]) + mode := packet.AEADMode(candidateAeadModes[0]) + // If the cipher specified by config is a candidate, we'll use that. + configuredCipher := config.Cipher() + for _, c := range candidateCiphers { + cipherFunc := packet.CipherFunction(c) + if cipherFunc == configuredCipher { + cipher = cipherFunc + break + } + } + + symKey := make([]byte, cipher.KeySize()) + if _, err := io.ReadFull(config.Random(), symKey); err != nil { + return nil, err + } + + for _, key := range encryptKeys { + if err := packet.SerializeEncryptedKey(keyWriter, key.PublicKey, cipher, symKey, config); err != nil { + return nil, err + } + } + + var payload io.WriteCloser + if aeadSupported { + payload, err = packet.SerializeAEADEncrypted(dataWriter, symKey, cipher, mode, config) + if err != nil { + return + } + } else { + payload, err = packet.SerializeSymmetricallyEncrypted(dataWriter, cipher, symKey, config) + if err != nil { + return + } + } + payload, err = handleCompression(payload, candidateCompression, config) + if err != nil { + return nil, err + } + + return writeAndSign(payload, candidateHashes, signed, hints, sigType, config) +} + +// Sign signs a message. The resulting WriteCloser must be closed after the +// contents of the file have been written. hints contains optional information +// that aids the recipients in processing the message. +// If config is nil, sensible defaults will be used. +func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Config) (input io.WriteCloser, err error) { + if signed == nil { + return nil, errors.InvalidArgumentError("no signer provided") + } + + // These are the possible hash functions that we'll use for the signature. + candidateHashes := []uint8{ + hashToHashId(crypto.SHA256), + hashToHashId(crypto.SHA384), + hashToHashId(crypto.SHA512), + hashToHashId(crypto.SHA1), + hashToHashId(crypto.RIPEMD160), + } + defaultHashes := candidateHashes[0:1] + preferredHashes := signed.PrimaryIdentity().SelfSignature.PreferredHash + if len(preferredHashes) == 0 { + preferredHashes = defaultHashes + } + candidateHashes = intersectPreferences(candidateHashes, preferredHashes) + if len(candidateHashes) == 0 { + return nil, errors.InvalidArgumentError("cannot sign because signing key shares no common algorithms with candidate hashes") + } + + return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, packet.SigTypeBinary, config) +} + +// signatureWriter hashes the contents of a message while passing it along to +// literalData. When closed, it closes literalData, writes a signature packet +// to encryptedData and then also closes encryptedData. +type signatureWriter struct { + encryptedData io.WriteCloser + literalData io.WriteCloser + hashType crypto.Hash + wrappedHash hash.Hash + h hash.Hash + signer *packet.PrivateKey + sigType packet.SignatureType + config *packet.Config + metadata *packet.LiteralData // V5 signatures protect document metadata +} + +func (s signatureWriter) Write(data []byte) (int, error) { + s.wrappedHash.Write(data) + switch s.sigType { + case packet.SigTypeBinary: + return s.literalData.Write(data) + case packet.SigTypeText: + flag := 0 + return writeCanonical(s.literalData, data, &flag) + } + return 0, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(s.sigType))) +} + +func (s signatureWriter) Close() error { + sig := &packet.Signature{ + Version: s.signer.Version, + SigType: s.sigType, + PubKeyAlgo: s.signer.PubKeyAlgo, + Hash: s.hashType, + CreationTime: s.config.Now(), + IssuerKeyId: &s.signer.KeyId, + Metadata: s.metadata, + } + + if err := sig.Sign(s.h, s.signer, s.config); err != nil { + return err + } + if err := s.literalData.Close(); err != nil { + return err + } + if err := sig.Serialize(s.encryptedData); err != nil { + return err + } + return s.encryptedData.Close() +} + +// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. +// TODO: we have two of these in OpenPGP packages alone. This probably needs +// to be promoted somewhere more common. +type noOpCloser struct { + w io.Writer +} + +func (c noOpCloser) Write(data []byte) (n int, err error) { + return c.w.Write(data) +} + +func (c noOpCloser) Close() error { + return nil +} + +func handleCompression(compressed io.WriteCloser, candidateCompression []uint8, config *packet.Config) (data io.WriteCloser, err error) { + data = compressed + confAlgo := config.Compression() + if confAlgo == packet.CompressionNone { + return + } + finalAlgo := packet.CompressionNone + // if compression specified by config available we will use it + for _, c := range candidateCompression { + if uint8(confAlgo) == c { + finalAlgo = confAlgo + break + } + } + + if finalAlgo != packet.CompressionNone { + var compConfig *packet.CompressionConfig + if config != nil { + compConfig = config.CompressionConfig + } + data, err = packet.SerializeCompressed(compressed, finalAlgo, compConfig) + if err != nil { + return + } + } + return data, nil +} diff --git a/vendor/github.com/aokoli/goutils/.travis.yml b/vendor/github.com/aokoli/goutils/.travis.yml deleted file mode 100644 index 4025e01ec4..0000000000 --- a/vendor/github.com/aokoli/goutils/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go - -go: - - 1.6 - - 1.7 - - 1.8 - - tip - -script: - - go test -v - -notifications: - webhooks: - urls: - - https://webhooks.gitter.im/e/06e3328629952dabe3e0 - on_success: change # options: [always|never|change] default: always - on_failure: always # options: [always|never|change] default: always - on_start: never # options: [always|never|change] default: always diff --git a/vendor/github.com/aokoli/goutils/CHANGELOG.md b/vendor/github.com/aokoli/goutils/CHANGELOG.md deleted file mode 100644 index d700ec47f2..0000000000 --- a/vendor/github.com/aokoli/goutils/CHANGELOG.md +++ /dev/null @@ -1,8 +0,0 @@ -# 1.0.1 (2017-05-31) - -## Fixed -- #21: Fix generation of alphanumeric strings (thanks @dbarranco) - -# 1.0.0 (2014-04-30) - -- Initial release. diff --git a/vendor/github.com/aokoli/goutils/README.md b/vendor/github.com/aokoli/goutils/README.md deleted file mode 100644 index 163ffe72a8..0000000000 --- a/vendor/github.com/aokoli/goutils/README.md +++ /dev/null @@ -1,70 +0,0 @@ -GoUtils -=========== -[![Stability: Maintenance](https://masterminds.github.io/stability/maintenance.svg)](https://masterminds.github.io/stability/maintenance.html) -[![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) [![Build Status](https://travis-ci.org/Masterminds/goutils.svg?branch=master)](https://travis-ci.org/Masterminds/goutils) [![Build status](https://ci.appveyor.com/api/projects/status/sc2b1ew0m7f0aiju?svg=true)](https://ci.appveyor.com/project/mattfarina/goutils) - - -GoUtils provides users with utility functions to manipulate strings in various ways. It is a Go implementation of some -string manipulation libraries of Java Apache Commons. GoUtils includes the following Java Apache Commons classes: -* WordUtils -* RandomStringUtils -* StringUtils (partial implementation) - -## Installation -If you have Go set up on your system, from the GOPATH directory within the command line/terminal, enter this: - - go get github.com/Masterminds/goutils - -If you do not have Go set up on your system, please follow the [Go installation directions from the documenation](http://golang.org/doc/install), and then follow the instructions above to install GoUtils. - - -## Documentation -GoUtils doc is available here: [![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) - - -## Usage -The code snippets below show examples of how to use GoUtils. Some functions return errors while others do not. The first instance below, which does not return an error, is the `Initials` function (located within the `wordutils.go` file). - - package main - - import ( - "fmt" - "github.com/Masterminds/goutils" - ) - - func main() { - - // EXAMPLE 1: A goutils function which returns no errors - fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" - - } -Some functions return errors mainly due to illegal arguements used as parameters. The code example below illustrates how to deal with function that returns an error. In this instance, the function is the `Random` function (located within the `randomstringutils.go` file). - - package main - - import ( - "fmt" - "github.com/Masterminds/goutils" - ) - - func main() { - - // EXAMPLE 2: A goutils function which returns an error - rand1, err1 := goutils.Random (-1, 0, 0, true, true) - - if err1 != nil { - fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) - } else { - fmt.Println(rand1) - } - - } - -## License -GoUtils is licensed under the Apache License, Version 2.0. Please check the LICENSE.txt file or visit http://www.apache.org/licenses/LICENSE-2.0 for a copy of the license. - -## Issue Reporting -Make suggestions or report issues using the Git issue tracker: https://github.com/Masterminds/goutils/issues - -## Website -* [GoUtils webpage](http://Masterminds.github.io/goutils/) diff --git a/vendor/github.com/aokoli/goutils/appveyor.yml b/vendor/github.com/aokoli/goutils/appveyor.yml deleted file mode 100644 index 657564a847..0000000000 --- a/vendor/github.com/aokoli/goutils/appveyor.yml +++ /dev/null @@ -1,21 +0,0 @@ -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\Masterminds\goutils -shallow_clone: true - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -build: off - -install: - - go version - - go env - -test_script: - - go test -v - -deploy: off diff --git a/vendor/github.com/aokoli/goutils/randomstringutils.go b/vendor/github.com/aokoli/goutils/randomstringutils.go deleted file mode 100644 index 1364e0cafd..0000000000 --- a/vendor/github.com/aokoli/goutils/randomstringutils.go +++ /dev/null @@ -1,268 +0,0 @@ -/* -Copyright 2014 Alexander Okoli - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package goutils - -import ( - "fmt" - "math" - "math/rand" - "regexp" - "time" - "unicode" -) - -// RANDOM provides the time-based seed used to generate random numbers -var RANDOM = rand.New(rand.NewSource(time.Now().UnixNano())) - -/* -RandomNonAlphaNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomNonAlphaNumeric(count int) (string, error) { - return RandomAlphaNumericCustom(count, false, false) -} - -/* -RandomAscii creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomAscii(count int) (string, error) { - return Random(count, 32, 127, false, false) -} - -/* -RandomNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of numeric characters. - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomNumeric(count int) (string, error) { - return Random(count, 0, 0, false, true) -} - -/* -RandomAlphabetic creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. - -Parameters: - count - the length of random string to create - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomAlphabetic(count int) (string, error) { - return Random(count, 0, 0, true, false) -} - -/* -RandomAlphaNumeric creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alpha-numeric characters. - -Parameter: - count - the length of random string to create - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomAlphaNumeric(count int) (string, error) { - RandomString, err := Random(count, 0, 0, true, true) - if err != nil { - return "", fmt.Errorf("Error: %s", err) - } - match, err := regexp.MatchString("([0-9]+)", RandomString) - if err != nil { - panic(err) - } - - if !match { - //Get the position between 0 and the length of the string-1 to insert a random number - position := rand.Intn(count) - //Insert a random number between [0-9] in the position - RandomString = RandomString[:position] + string('0'+rand.Intn(10)) + RandomString[position+1:] - return RandomString, err - } - return RandomString, err - -} - -/* -RandomAlphaNumericCustom creates a random string whose length is the number of characters specified. -Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. - -Parameters: - count - the length of random string to create - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func RandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { - return Random(count, 0, 0, letters, numbers) -} - -/* -Random creates a random string based on a variety of options, using default source of randomness. -This method has exactly the same semantics as RandomSeed(int, int, int, bool, bool, []char, *rand.Rand), but -instead of using an externally supplied source of randomness, it uses the internal *rand.Rand instance. - -Parameters: - count - the length of random string to create - start - the position in set of chars (ASCII/Unicode int) to start at - end - the position in set of chars (ASCII/Unicode int) to end before - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. - -Returns: - string - the random string - error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) -*/ -func Random(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { - return RandomSeed(count, start, end, letters, numbers, chars, RANDOM) -} - -/* -RandomSeed creates a random string based on a variety of options, using supplied source of randomness. -If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, -unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. -If chars is not nil, characters stored in chars that are between start and end are chosen. -This method accepts a user-supplied *rand.Rand instance to use as a source of randomness. By seeding a single *rand.Rand instance -with a fixed seed and using it for each call, the same random sequence of strings can be generated repeatedly and predictably. - -Parameters: - count - the length of random string to create - start - the position in set of chars (ASCII/Unicode decimals) to start at - end - the position in set of chars (ASCII/Unicode decimals) to end before - letters - if true, generated string may include alphabetic characters - numbers - if true, generated string may include numeric characters - chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. - random - a source of randomness. - -Returns: - string - the random string - error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) -*/ -func RandomSeed(count int, start int, end int, letters bool, numbers bool, chars []rune, random *rand.Rand) (string, error) { - - if count == 0 { - return "", nil - } else if count < 0 { - err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") - return "", err - } - if chars != nil && len(chars) == 0 { - err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") - return "", err - } - - if start == 0 && end == 0 { - if chars != nil { - end = len(chars) - } else { - if !letters && !numbers { - end = math.MaxInt32 - } else { - end = 'z' + 1 - start = ' ' - } - } - } else { - if end <= start { - err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) - return "", err - } - - if chars != nil && end > len(chars) { - err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) - return "", err - } - } - - buffer := make([]rune, count) - gap := end - start - - // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 - // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 - - for count != 0 { - count-- - var ch rune - if chars == nil { - ch = rune(random.Intn(gap) + start) - } else { - ch = chars[random.Intn(gap)+start] - } - - if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { - if ch >= 56320 && ch <= 57343 { // low surrogate range - if count == 0 { - count++ - } else { - // Insert low surrogate - buffer[count] = ch - count-- - // Insert high surrogate - buffer[count] = rune(55296 + random.Intn(128)) - } - } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) - if count == 0 { - count++ - } else { - // Insert low surrogate - buffer[count] = rune(56320 + random.Intn(128)) - count-- - // Insert high surrogate - buffer[count] = ch - } - } else if ch >= 56192 && ch <= 56319 { - // private high surrogate, skip it - count++ - } else { - // not one of the surrogates* - buffer[count] = ch - } - } else { - count++ - } - } - return string(buffer), nil -} diff --git a/vendor/github.com/aokoli/goutils/stringutils.go b/vendor/github.com/aokoli/goutils/stringutils.go deleted file mode 100644 index 5037c4516b..0000000000 --- a/vendor/github.com/aokoli/goutils/stringutils.go +++ /dev/null @@ -1,224 +0,0 @@ -/* -Copyright 2014 Alexander Okoli - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package goutils - -import ( - "bytes" - "fmt" - "strings" - "unicode" -) - -// Typically returned by functions where a searched item cannot be found -const INDEX_NOT_FOUND = -1 - -/* -Abbreviate abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "Now is the time for..." - -Specifically, the algorithm is as follows: - - - If str is less than maxWidth characters long, return it. - - Else abbreviate it to (str[0:maxWidth - 3] + "..."). - - If maxWidth is less than 4, return an illegal argument error. - - In no case will it return a string of length greater than maxWidth. - -Parameters: - str - the string to check - maxWidth - maximum length of result string, must be at least 4 - -Returns: - string - abbreviated string - error - if the width is too small -*/ -func Abbreviate(str string, maxWidth int) (string, error) { - return AbbreviateFull(str, 0, maxWidth) -} - -/* -AbbreviateFull abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "...is the time for..." -This function works like Abbreviate(string, int), but allows you to specify a "left edge" offset. Note that this left edge is not -necessarily going to be the leftmost character in the result, or the first character following the ellipses, but it will appear -somewhere in the result. -In no case will it return a string of length greater than maxWidth. - -Parameters: - str - the string to check - offset - left edge of source string - maxWidth - maximum length of result string, must be at least 4 - -Returns: - string - abbreviated string - error - if the width is too small -*/ -func AbbreviateFull(str string, offset int, maxWidth int) (string, error) { - if str == "" { - return "", nil - } - if maxWidth < 4 { - err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width is 4") - return "", err - } - if len(str) <= maxWidth { - return str, nil - } - if offset > len(str) { - offset = len(str) - } - if len(str)-offset < (maxWidth - 3) { // 15 - 5 < 10 - 3 = 10 < 7 - offset = len(str) - (maxWidth - 3) - } - abrevMarker := "..." - if offset <= 4 { - return str[0:maxWidth-3] + abrevMarker, nil // str.substring(0, maxWidth - 3) + abrevMarker; - } - if maxWidth < 7 { - err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width with offset is 7") - return "", err - } - if (offset + maxWidth - 3) < len(str) { // 5 + (10-3) < 15 = 12 < 15 - abrevStr, _ := Abbreviate(str[offset:len(str)], (maxWidth - 3)) - return abrevMarker + abrevStr, nil // abrevMarker + abbreviate(str.substring(offset), maxWidth - 3); - } - return abrevMarker + str[(len(str)-(maxWidth-3)):len(str)], nil // abrevMarker + str.substring(str.length() - (maxWidth - 3)); -} - -/* -DeleteWhiteSpace deletes all whitespaces from a string as defined by unicode.IsSpace(rune). -It returns the string without whitespaces. - -Parameter: - str - the string to delete whitespace from, may be nil - -Returns: - the string without whitespaces -*/ -func DeleteWhiteSpace(str string) string { - if str == "" { - return str - } - sz := len(str) - var chs bytes.Buffer - count := 0 - for i := 0; i < sz; i++ { - ch := rune(str[i]) - if !unicode.IsSpace(ch) { - chs.WriteRune(ch) - count++ - } - } - if count == sz { - return str - } - return chs.String() -} - -/* -IndexOfDifference compares two strings, and returns the index at which the strings begin to differ. - -Parameters: - str1 - the first string - str2 - the second string - -Returns: - the index where str1 and str2 begin to differ; -1 if they are equal -*/ -func IndexOfDifference(str1 string, str2 string) int { - if str1 == str2 { - return INDEX_NOT_FOUND - } - if IsEmpty(str1) || IsEmpty(str2) { - return 0 - } - var i int - for i = 0; i < len(str1) && i < len(str2); i++ { - if rune(str1[i]) != rune(str2[i]) { - break - } - } - if i < len(str2) || i < len(str1) { - return i - } - return INDEX_NOT_FOUND -} - -/* -IsBlank checks if a string is whitespace or empty (""). Observe the following behavior: - - goutils.IsBlank("") = true - goutils.IsBlank(" ") = true - goutils.IsBlank("bob") = false - goutils.IsBlank(" bob ") = false - -Parameter: - str - the string to check - -Returns: - true - if the string is whitespace or empty ("") -*/ -func IsBlank(str string) bool { - strLen := len(str) - if str == "" || strLen == 0 { - return true - } - for i := 0; i < strLen; i++ { - if unicode.IsSpace(rune(str[i])) == false { - return false - } - } - return true -} - -/* -IndexOf returns the index of the first instance of sub in str, with the search beginning from the -index start point specified. -1 is returned if sub is not present in str. - -An empty string ("") will return -1 (INDEX_NOT_FOUND). A negative start position is treated as zero. -A start position greater than the string length returns -1. - -Parameters: - str - the string to check - sub - the substring to find - start - the start position; negative treated as zero - -Returns: - the first index where the sub string was found (always >= start) -*/ -func IndexOf(str string, sub string, start int) int { - - if start < 0 { - start = 0 - } - - if len(str) < start { - return INDEX_NOT_FOUND - } - - if IsEmpty(str) || IsEmpty(sub) { - return INDEX_NOT_FOUND - } - - partialIndex := strings.Index(str[start:len(str)], sub) - if partialIndex == -1 { - return INDEX_NOT_FOUND - } - return partialIndex + start -} - -// IsEmpty checks if a string is empty (""). Returns true if empty, and false otherwise. -func IsEmpty(str string) bool { - return len(str) == 0 -} diff --git a/vendor/github.com/aokoli/goutils/wordutils.go b/vendor/github.com/aokoli/goutils/wordutils.go deleted file mode 100644 index e92dd39900..0000000000 --- a/vendor/github.com/aokoli/goutils/wordutils.go +++ /dev/null @@ -1,356 +0,0 @@ -/* -Copyright 2014 Alexander Okoli - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* -Package goutils provides utility functions to manipulate strings in various ways. -The code snippets below show examples of how to use goutils. Some functions return -errors while others do not, so usage would vary as a result. - -Example: - - package main - - import ( - "fmt" - "github.com/aokoli/goutils" - ) - - func main() { - - // EXAMPLE 1: A goutils function which returns no errors - fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" - - - - // EXAMPLE 2: A goutils function which returns an error - rand1, err1 := goutils.Random (-1, 0, 0, true, true) - - if err1 != nil { - fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) - } else { - fmt.Println(rand1) - } - } -*/ -package goutils - -import ( - "bytes" - "strings" - "unicode" -) - -// VERSION indicates the current version of goutils -const VERSION = "1.0.0" - -/* -Wrap wraps a single line of text, identifying words by ' '. -New lines will be separated by '\n'. Very long words, such as URLs will not be wrapped. -Leading spaces on a new line are stripped. Trailing spaces are not stripped. - -Parameters: - str - the string to be word wrapped - wrapLength - the column (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 - -Returns: - a line with newlines inserted -*/ -func Wrap(str string, wrapLength int) string { - return WrapCustom(str, wrapLength, "", false) -} - -/* -WrapCustom wraps a single line of text, identifying words by ' '. -Leading spaces on a new line are stripped. Trailing spaces are not stripped. - -Parameters: - str - the string to be word wrapped - wrapLength - the column number (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 - newLineStr - the string to insert for a new line, "" uses '\n' - wrapLongWords - true if long words (such as URLs) should be wrapped - -Returns: - a line with newlines inserted -*/ -func WrapCustom(str string, wrapLength int, newLineStr string, wrapLongWords bool) string { - - if str == "" { - return "" - } - if newLineStr == "" { - newLineStr = "\n" // TODO Assumes "\n" is seperator. Explore SystemUtils.LINE_SEPARATOR from Apache Commons - } - if wrapLength < 1 { - wrapLength = 1 - } - - inputLineLength := len(str) - offset := 0 - - var wrappedLine bytes.Buffer - - for inputLineLength-offset > wrapLength { - - if rune(str[offset]) == ' ' { - offset++ - continue - } - - end := wrapLength + offset + 1 - spaceToWrapAt := strings.LastIndex(str[offset:end], " ") + offset - - if spaceToWrapAt >= offset { - // normal word (not longer than wrapLength) - wrappedLine.WriteString(str[offset:spaceToWrapAt]) - wrappedLine.WriteString(newLineStr) - offset = spaceToWrapAt + 1 - - } else { - // long word or URL - if wrapLongWords { - end := wrapLength + offset - // long words are wrapped one line at a time - wrappedLine.WriteString(str[offset:end]) - wrappedLine.WriteString(newLineStr) - offset += wrapLength - } else { - // long words aren't wrapped, just extended beyond limit - end := wrapLength + offset - spaceToWrapAt = strings.IndexRune(str[end:len(str)], ' ') + end - if spaceToWrapAt >= 0 { - wrappedLine.WriteString(str[offset:spaceToWrapAt]) - wrappedLine.WriteString(newLineStr) - offset = spaceToWrapAt + 1 - } else { - wrappedLine.WriteString(str[offset:len(str)]) - offset = inputLineLength - } - } - } - } - - wrappedLine.WriteString(str[offset:len(str)]) - - return wrappedLine.String() - -} - -/* -Capitalize capitalizes all the delimiter separated words in a string. Only the first letter of each word is changed. -To convert the rest of each word to lowercase at the same time, use CapitalizeFully(str string, delimiters ...rune). -The delimiters represent a set of characters understood to separate words. The first string character -and the first non-delimiter character after a delimiter will be capitalized. A "" input string returns "". -Capitalization uses the Unicode title case, normally equivalent to upper case. - -Parameters: - str - the string to capitalize - delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter - -Returns: - capitalized string -*/ -func Capitalize(str string, delimiters ...rune) string { - - var delimLen int - - if delimiters == nil { - delimLen = -1 - } else { - delimLen = len(delimiters) - } - - if str == "" || delimLen == 0 { - return str - } - - buffer := []rune(str) - capitalizeNext := true - for i := 0; i < len(buffer); i++ { - ch := buffer[i] - if isDelimiter(ch, delimiters...) { - capitalizeNext = true - } else if capitalizeNext { - buffer[i] = unicode.ToTitle(ch) - capitalizeNext = false - } - } - return string(buffer) - -} - -/* -CapitalizeFully converts all the delimiter separated words in a string into capitalized words, that is each word is made up of a -titlecase character and then a series of lowercase characters. The delimiters represent a set of characters understood -to separate words. The first string character and the first non-delimiter character after a delimiter will be capitalized. -Capitalization uses the Unicode title case, normally equivalent to upper case. - -Parameters: - str - the string to capitalize fully - delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter - -Returns: - capitalized string -*/ -func CapitalizeFully(str string, delimiters ...rune) string { - - var delimLen int - - if delimiters == nil { - delimLen = -1 - } else { - delimLen = len(delimiters) - } - - if str == "" || delimLen == 0 { - return str - } - str = strings.ToLower(str) - return Capitalize(str, delimiters...) -} - -/* -Uncapitalize uncapitalizes all the whitespace separated words in a string. Only the first letter of each word is changed. -The delimiters represent a set of characters understood to separate words. The first string character and the first non-delimiter -character after a delimiter will be uncapitalized. Whitespace is defined by unicode.IsSpace(char). - -Parameters: - str - the string to uncapitalize fully - delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter - -Returns: - uncapitalized string -*/ -func Uncapitalize(str string, delimiters ...rune) string { - - var delimLen int - - if delimiters == nil { - delimLen = -1 - } else { - delimLen = len(delimiters) - } - - if str == "" || delimLen == 0 { - return str - } - - buffer := []rune(str) - uncapitalizeNext := true // TODO Always makes capitalize/un apply to first char. - for i := 0; i < len(buffer); i++ { - ch := buffer[i] - if isDelimiter(ch, delimiters...) { - uncapitalizeNext = true - } else if uncapitalizeNext { - buffer[i] = unicode.ToLower(ch) - uncapitalizeNext = false - } - } - return string(buffer) -} - -/* -SwapCase swaps the case of a string using a word based algorithm. - -Conversion algorithm: - - Upper case character converts to Lower case - Title case character converts to Lower case - Lower case character after Whitespace or at start converts to Title case - Other Lower case character converts to Upper case - Whitespace is defined by unicode.IsSpace(char). - -Parameters: - str - the string to swap case - -Returns: - the changed string -*/ -func SwapCase(str string) string { - if str == "" { - return str - } - buffer := []rune(str) - - whitespace := true - - for i := 0; i < len(buffer); i++ { - ch := buffer[i] - if unicode.IsUpper(ch) { - buffer[i] = unicode.ToLower(ch) - whitespace = false - } else if unicode.IsTitle(ch) { - buffer[i] = unicode.ToLower(ch) - whitespace = false - } else if unicode.IsLower(ch) { - if whitespace { - buffer[i] = unicode.ToTitle(ch) - whitespace = false - } else { - buffer[i] = unicode.ToUpper(ch) - } - } else { - whitespace = unicode.IsSpace(ch) - } - } - return string(buffer) -} - -/* -Initials extracts the initial letters from each word in the string. The first letter of the string and all first -letters after the defined delimiters are returned as a new string. Their case is not changed. If the delimiters -parameter is excluded, then Whitespace is used. Whitespace is defined by unicode.IsSpacea(char). An empty delimiter array returns an empty string. - -Parameters: - str - the string to get initials from - delimiters - set of characters to determine words, exclusion of this parameter means whitespace would be delimeter -Returns: - string of initial letters -*/ -func Initials(str string, delimiters ...rune) string { - if str == "" { - return str - } - if delimiters != nil && len(delimiters) == 0 { - return "" - } - strLen := len(str) - var buf bytes.Buffer - lastWasGap := true - for i := 0; i < strLen; i++ { - ch := rune(str[i]) - - if isDelimiter(ch, delimiters...) { - lastWasGap = true - } else if lastWasGap { - buf.WriteRune(ch) - lastWasGap = false - } - } - return buf.String() -} - -// private function (lower case func name) -func isDelimiter(ch rune, delimiters ...rune) bool { - if delimiters == nil { - return unicode.IsSpace(ch) - } - for _, delimiter := range delimiters { - if ch == delimiter { - return true - } - } - return false -} diff --git a/vendor/github.com/blakesmith/ar/.gitignore b/vendor/github.com/blakesmith/ar/.gitignore new file mode 100644 index 0000000000..99cf5ff7dc --- /dev/null +++ b/vendor/github.com/blakesmith/ar/.gitignore @@ -0,0 +1 @@ +*.*~ diff --git a/vendor/github.com/blakesmith/ar/COPYING b/vendor/github.com/blakesmith/ar/COPYING new file mode 100644 index 0000000000..c99e81289f --- /dev/null +++ b/vendor/github.com/blakesmith/ar/COPYING @@ -0,0 +1,20 @@ +Copyright (c) 2013 Blake Smith + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/vendor/github.com/blakesmith/ar/README.md b/vendor/github.com/blakesmith/ar/README.md new file mode 100644 index 0000000000..ff75d7ff0f --- /dev/null +++ b/vendor/github.com/blakesmith/ar/README.md @@ -0,0 +1,9 @@ +# Golang ar (archive) file reader + +This is a simple library for reading and writing [ar](http://en.wikipedia.org/wiki/Ar_(Unix)) files in common format. It is influenced heavily in style and interface from the golang [tar](http://golang.org/pkg/archive/tar/) package. + +## Author + +Written by Blake Smith + +Licensed under the MIT license. \ No newline at end of file diff --git a/vendor/github.com/blakesmith/ar/common.go b/vendor/github.com/blakesmith/ar/common.go new file mode 100644 index 0000000000..335bade4e2 --- /dev/null +++ b/vendor/github.com/blakesmith/ar/common.go @@ -0,0 +1,48 @@ +/* +Copyright (c) 2013 Blake Smith + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ +package ar + +import ( + "time" +) + +const ( + HEADER_BYTE_SIZE = 60 + GLOBAL_HEADER = "!\n" +) + +type Header struct { + Name string + ModTime time.Time + Uid int + Gid int + Mode int64 + Size int64 +} + +type slicer []byte + +func (sp *slicer) next(n int) (b []byte) { + s := *sp + b, *sp = s[0:n], s[n:] + return +} diff --git a/vendor/github.com/blakesmith/ar/reader.go b/vendor/github.com/blakesmith/ar/reader.go new file mode 100644 index 0000000000..4b82493982 --- /dev/null +++ b/vendor/github.com/blakesmith/ar/reader.go @@ -0,0 +1,155 @@ +/* +Copyright (c) 2013 Blake Smith + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ +package ar + +import ( + "io" + "io/ioutil" + "os" + "strconv" + "time" +) + +// Provides read access to an ar archive. +// Call next to skip files +// +// Example: +// reader := NewReader(f) +// var buf bytes.Buffer +// for { +// _, err := reader.Next() +// if err == io.EOF { +// break +// } +// if err != nil { +// t.Errorf(err.Error()) +// } +// io.Copy(&buf, reader) +// } + +type Reader struct { + r io.Reader + nb int64 + pad int64 +} + +// Copies read data to r. Strips the global ar header. +func NewReader(r io.Reader) *Reader { + io.CopyN(ioutil.Discard, r, 8) // Discard global header + + return &Reader{r: r} +} + +func (rd *Reader) string(b []byte) string { + i := len(b)-1 + for i > 0 && b[i] == 32 { + i-- + } + + return string(b[0:i+1]) +} + +func (rd *Reader) numeric(b []byte) int64 { + i := len(b)-1 + for i > 0 && b[i] == 32 { + i-- + } + + n, _ := strconv.ParseInt(string(b[0:i+1]), 10, 64) + + return n +} + +func (rd *Reader) octal(b []byte) int64 { + i := len(b)-1 + for i > 0 && b[i] == 32 { + i-- + } + + n, _ := strconv.ParseInt(string(b[3:i+1]), 8, 64) + + return n +} + +func (rd *Reader) skipUnread() error { + skip := rd.nb + rd.pad + rd.nb, rd.pad = 0, 0 + if seeker, ok := rd.r.(io.Seeker); ok { + _, err := seeker.Seek(skip, os.SEEK_CUR) + return err + } + + _, err := io.CopyN(ioutil.Discard, rd.r, skip) + return err +} + +func (rd *Reader) readHeader() (*Header, error) { + headerBuf := make([]byte, HEADER_BYTE_SIZE) + if _, err := io.ReadFull(rd.r, headerBuf); err != nil { + return nil, err + } + + header := new(Header) + s := slicer(headerBuf) + + header.Name = rd.string(s.next(16)) + header.ModTime = time.Unix(rd.numeric(s.next(12)), 0) + header.Uid = int(rd.numeric(s.next(6))) + header.Gid = int(rd.numeric(s.next(6))) + header.Mode = rd.octal(s.next(8)) + header.Size = rd.numeric(s.next(10)) + + rd.nb = int64(header.Size) + if header.Size%2 == 1 { + rd.pad = 1 + } else { + rd.pad = 0 + } + + return header, nil +} + +// Call Next() to skip to the next file in the archive file. +// Returns a Header which contains the metadata about the +// file in the archive. +func (rd *Reader) Next() (*Header, error) { + err := rd.skipUnread() + if err != nil { + return nil, err + } + + return rd.readHeader() +} + +// Read data from the current entry in the archive. +func (rd *Reader) Read(b []byte) (n int, err error) { + if rd.nb == 0 { + return 0, io.EOF + } + if int64(len(b)) > rd.nb { + b = b[0:rd.nb] + } + n, err = rd.r.Read(b) + rd.nb -= int64(n) + + return +} diff --git a/vendor/github.com/blakesmith/ar/writer.go b/vendor/github.com/blakesmith/ar/writer.go new file mode 100644 index 0000000000..680f5cbc74 --- /dev/null +++ b/vendor/github.com/blakesmith/ar/writer.go @@ -0,0 +1,124 @@ +/* +Copyright (c) 2013 Blake Smith + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ +package ar + +import ( + "errors" + "io" + "strconv" +) + +var ( + ErrWriteTooLong = errors.New("ar: write too long") +) + +// Writer provides sequential writing of an ar archive. +// An ar archive is sequence of header file pairs +// Call WriteHeader to begin writing a new file, then call Write to supply the file's data +// +// Example: +// archive := ar.NewWriter(writer) +// archive.WriteGlobalHeader() +// header := new(ar.Header) +// header.Size = 15 // bytes +// if err := archive.WriteHeader(header); err != nil { +// return err +// } +// io.Copy(archive, data) +type Writer struct { + w io.Writer + nb int64 // number of unwritten bytes for the current file entry +} + +// Create a new ar writer that writes to w +func NewWriter(w io.Writer) *Writer { return &Writer{w: w} } + +func (aw *Writer) numeric(b []byte, x int64) { + s := strconv.FormatInt(x, 10) + for len(s) < len(b) { + s = s + " " + } + copy(b, []byte(s)) +} + +func (aw *Writer) octal(b []byte, x int64) { + s := "100" + strconv.FormatInt(x, 8) + for len(s) < len(b) { + s = s + " " + } + copy(b, []byte(s)) +} + +func (aw *Writer) string(b []byte, str string) { + s := str + for len(s) < len(b) { + s = s + " " + } + copy(b, []byte(s)) +} + +// Writes to the current entry in the ar archive +// Returns ErrWriteTooLong if more than header.Size +// bytes are written after a call to WriteHeader +func (aw *Writer) Write(b []byte) (n int, err error) { + if int64(len(b)) > aw.nb { + b = b[0:aw.nb] + err = ErrWriteTooLong + } + n, werr := aw.w.Write(b) + aw.nb -= int64(n) + if werr != nil { + return n, werr + } + + if len(b)%2 == 1 { // data size must be aligned to an even byte + n2, _ := aw.w.Write([]byte{'\n'}) + return n+n2, err + } + + return +} + +func (aw *Writer) WriteGlobalHeader() error { + _, err := aw.w.Write([]byte(GLOBAL_HEADER)) + return err +} + +// Writes the header to the underlying writer and prepares +// to receive the file payload +func (aw *Writer) WriteHeader(hdr *Header) error { + aw.nb = int64(hdr.Size) + header := make([]byte, HEADER_BYTE_SIZE) + s := slicer(header) + + aw.string(s.next(16), hdr.Name) + aw.numeric(s.next(12), hdr.ModTime.Unix()) + aw.numeric(s.next(6), int64(hdr.Uid)) + aw.numeric(s.next(6), int64(hdr.Gid)) + aw.octal(s.next(8), hdr.Mode) + aw.numeric(s.next(10), hdr.Size) + aw.string(s.next(2), "`\n") + + _, err := aw.w.Write(header) + + return err +} diff --git a/vendor/github.com/bufbuild/buf/private/buf/bufcli/bufcli.go b/vendor/github.com/bufbuild/buf/private/buf/bufcli/bufcli.go index 77be6b2e6f..d99b1a0587 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/bufcli/bufcli.go +++ b/vendor/github.com/bufbuild/buf/private/buf/bufcli/bufcli.go @@ -22,6 +22,7 @@ import ( "io" "net/http" "os" + "strconv" "strings" "github.com/bufbuild/buf/private/buf/bufapp" @@ -50,6 +51,7 @@ import ( "github.com/bufbuild/buf/private/pkg/filelock" "github.com/bufbuild/buf/private/pkg/git" "github.com/bufbuild/buf/private/pkg/httpauth" + "github.com/bufbuild/buf/private/pkg/netrc" "github.com/bufbuild/buf/private/pkg/normalpath" "github.com/bufbuild/buf/private/pkg/storage" "github.com/bufbuild/buf/private/pkg/storage/storageos" @@ -63,7 +65,7 @@ import ( const ( // Version is the CLI version of buf. - Version = "1.12.0" + Version = "1.14.0" inputHTTPSUsernameEnvKey = "BUF_INPUT_HTTPS_USERNAME" inputHTTPSPasswordEnvKey = "BUF_INPUT_HTTPS_PASSWORD" @@ -73,6 +75,9 @@ const ( alphaSuppressWarningsEnvKey = "BUF_ALPHA_SUPPRESS_WARNINGS" betaSuppressWarningsEnvKey = "BUF_BETA_SUPPRESS_WARNINGS" + // BetaEnableTamperProofingEnvKey is an env var to enable tamper proofing + BetaEnableTamperProofingEnvKey = "BUF_BETA_ENABLE_TAMPER_PROOFING" + inputHashtagFlagName = "__hashtag__" inputHashtagFlagShortName = "#" @@ -174,9 +179,9 @@ func BindAsFileDescriptorSet(flagSet *pflag.FlagSet, addr *bool, flagName string addr, flagName, false, - `Output as a google.protobuf.FileDescriptorSet instead of an image. + `Output as a google.protobuf.FileDescriptorSet instead of an image Note that images are wire compatible with FileDescriptorSets, but this flag strips -the additional metadata added for Buf usage.`, +the additional metadata added for Buf usage`, ) } @@ -196,7 +201,7 @@ func BindExcludeSourceInfo(flagSet *pflag.FlagSet, addr *bool, flagName string) addr, flagName, false, - "Exclude source info.", + "Exclude source info", ) } @@ -210,8 +215,8 @@ func BindPaths( pathsAddr, pathsFlagName, nil, - `Limit to specific files or directories, for example "proto/a/a.proto" or "proto/a". -If specified multiple times, the union is taken.`, + `Limit to specific files or directories, e.g. "proto/a/a.proto", "proto/a" +If specified multiple times, the union is taken`, ) } @@ -241,8 +246,8 @@ func BindExcludePaths( excludePathsAddr, excludePathsFlagName, nil, - `Exclude specific files or directories, for example "proto/a/a.proto" or "proto/a". -If specified multiple times, the union is taken.`, + `Exclude specific files or directories, e.g. "proto/a/a.proto", "proto/a" +If specified multiple times, the union is taken`, ) } @@ -252,8 +257,8 @@ func BindDisableSymlinks(flagSet *pflag.FlagSet, addr *bool, flagName string) { addr, flagName, false, - `Do not follow symlinks when reading sources or configuration from the local filesystem. -By default, symlinks are followed in this CLI, but never followed on the Buf Schema Registry.`, + `Do not follow symlinks when reading sources or configuration from the local filesystem +By default, symlinks are followed in this CLI, but never followed on the Buf Schema Registry`, ) } @@ -388,7 +393,7 @@ func NewWireImageConfigReader( logger, storageosProvider, newFetchReader(logger, storageosProvider, runner, moduleResolver, moduleReader), - bufmodulebuild.NewModuleBucketBuilder(logger), + bufmodulebuild.NewModuleBucketBuilder(), bufmodulebuild.NewModuleFileSetBuilder(logger, moduleReader), bufimagebuild.NewBuilder(logger), ), nil @@ -414,7 +419,7 @@ func NewWireModuleConfigReader( logger, storageosProvider, newFetchReader(logger, storageosProvider, runner, moduleResolver, moduleReader), - bufmodulebuild.NewModuleBucketBuilder(logger), + bufmodulebuild.NewModuleBucketBuilder(), ), nil } @@ -436,7 +441,7 @@ func NewWireModuleConfigReaderForModuleReader( logger, storageosProvider, newFetchReader(logger, storageosProvider, runner, moduleResolver, moduleReader), - bufmodulebuild.NewModuleBucketBuilder(logger), + bufmodulebuild.NewModuleBucketBuilder(), ), nil } @@ -460,7 +465,7 @@ func NewWireFileLister( logger, storageosProvider, newFetchReader(logger, storageosProvider, runner, moduleResolver, moduleReader), - bufmodulebuild.NewModuleBucketBuilder(logger), + bufmodulebuild.NewModuleBucketBuilder(), bufmodulebuild.NewModuleFileSetBuilder(logger, moduleReader), bufimagebuild.NewBuilder(logger), ), nil @@ -539,7 +544,7 @@ func NewModuleReaderAndCreateCacheDirsWithExternalPaths( func newModuleReaderAndCreateCacheDirs( container appflag.Container, clientConfig *connectclient.Config, - moduleReaderOptions ...bufmodulecache.ModuleReaderOption, + cacheModuleReaderOpts ...bufmodulecache.ModuleReaderOption, ) (bufmodule.ModuleReader, error) { cacheModuleDataDirPath := normalpath.Join(container.CacheDirPath(), v1CacheModuleDataRelDirPath) cacheModuleLockDirPath := normalpath.Join(container.CacheDirPath(), v1CacheModuleLockRelDirPath) @@ -575,15 +580,27 @@ func newModuleReaderAndCreateCacheDirs( if err != nil { return nil, err } + var moduleReaderOpts []bufapimodule.ModuleReaderOption + // Check if tamper proofing env var is enabled + tamperProofingEnabled, err := IsBetaTamperProofingEnabled(container) + if err != nil { + return nil, err + } + if tamperProofingEnabled { + moduleReaderOpts = append(moduleReaderOpts, bufapimodule.WithTamperProofing()) + } moduleReader := bufmodulecache.NewModuleReader( container.Logger(), container.VerbosePrinter(), fileLocker, dataReadWriteBucket, sumReadWriteBucket, - bufapimodule.NewModuleReader(bufapimodule.NewDownloadServiceClientFactory(clientConfig)), + bufapimodule.NewModuleReader( + bufapimodule.NewDownloadServiceClientFactory(clientConfig), + moduleReaderOpts..., + ), bufmodulecache.NewRepositoryServiceClientFactory(clientConfig), - moduleReaderOptions..., + cacheModuleReaderOpts..., ) return moduleReader, nil } @@ -629,10 +646,15 @@ func newConnectClientConfigWithOptions(container appflag.Container, opts ...conn // up the token in the container or in netrc based on the address of each individual client. // It is then set in the header of all outgoing requests from clients created using this config. func NewConnectClientConfig(container appflag.Container) (*connectclient.Config, error) { + envTokenProvider, err := bufconnect.NewTokenProviderFromContainer(container) + if err != nil { + return nil, err + } + netrcTokenProvider := bufconnect.NewNetrcTokenProvider(container, netrc.GetMachineForName) return newConnectClientConfigWithOptions( container, connectclient.WithAuthInterceptorProvider( - bufconnect.NewAuthorizationInterceptorProvider(container), + bufconnect.NewAuthorizationInterceptorProvider(envTokenProvider, netrcTokenProvider), ), ) } @@ -640,10 +662,14 @@ func NewConnectClientConfig(container appflag.Container) (*connectclient.Config, // NewConnectClientConfigWithToken creates a new connect.ClientConfig with a given token. The provided token is // set in the header of all outgoing requests from this provider func NewConnectClientConfigWithToken(container appflag.Container, token string) (*connectclient.Config, error) { + tokenProvider, err := bufconnect.NewTokenProviderFromString(token) + if err != nil { + return nil, err + } return newConnectClientConfigWithOptions( container, connectclient.WithAuthInterceptorProvider( - bufconnect.NewAuthorizationInterceptorProviderWithToken(token), + bufconnect.NewAuthorizationInterceptorProvider(tokenProvider), ), ) } @@ -750,20 +776,6 @@ func BucketAndConfigForSource( return sourceBucket, sourceConfig, nil } -// ReadModule gets a module from a source bucket and its config. -func ReadModule( - ctx context.Context, - logger *zap.Logger, - sourceBucket storage.ReadBucketCloser, - sourceConfig *bufconfig.Config, -) (bufmodule.Module, error) { - return bufmodulebuild.NewModuleBucketBuilder(logger).BuildForBucket( - ctx, - sourceBucket, - sourceConfig.Build, - ) -} - // NewImageForSource resolves a single bufimage.Image from the user-provided source with the build options. func NewImageForSource( ctx context.Context, @@ -836,7 +848,7 @@ func WellKnownTypeImage(ctx context.Context, logger *zap.Logger, wellKnownType s if err != nil { return nil, err } - module, err := bufmodulebuild.NewModuleBucketBuilder(logger).BuildForBucket( + module, err := bufmodulebuild.BuildForBucket( ctx, datawkt.ReadBucket, sourceConfig.Build, @@ -878,6 +890,20 @@ func VisibilityFlagToVisibilityAllowUnspecified(visibility string) (registryv1al } } +// IsBetaTamperProofingEnabled returns if BUF_BETA_ENABLE_TAMPER_PROOFING is set to true. +func IsBetaTamperProofingEnabled(container app.EnvContainer) (bool, error) { + // Check if tamper proofing env var is enabled + tamperProofingEnabled := false + if envVal := container.Env(BetaEnableTamperProofingEnvKey); envVal != "" { + var err error + tamperProofingEnabled, err = strconv.ParseBool(envVal) + if err != nil { + return false, fmt.Errorf("invalid value for %q: %w", BetaEnableTamperProofingEnvKey, err) + } + } + return tamperProofingEnabled, nil +} + // ValidateErrorFormatFlag validates the error format flag for all commands but lint. func ValidateErrorFormatFlag(errorFormatString string, errorFormatFlagName string) error { return validateErrorFormatFlag(bufanalysis.AllFormatStrings, errorFormatString, errorFormatFlagName) diff --git a/vendor/github.com/bufbuild/buf/private/buf/bufcli/errors.go b/vendor/github.com/bufbuild/buf/private/buf/bufcli/errors.go index d12abe3f71..a877f2574b 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/bufcli/errors.go +++ b/vendor/github.com/bufbuild/buf/private/buf/bufcli/errors.go @@ -195,7 +195,7 @@ func wrapError(err error) error { return fmt.Errorf(`%s Are you sure "%s" is a valid remote address?`, msg, strings.TrimPrefix(dnsError.Name, buftransport.APISubdomain+".")) } - return fmt.Errorf(msg) + return errors.New(msg) } return fmt.Errorf("Failure: %s", connectErr.Message()) } diff --git a/vendor/github.com/bufbuild/buf/private/buf/bufconvert/bufconvert.go b/vendor/github.com/bufbuild/buf/private/buf/bufconvert/bufconvert.go index 16c060c45c..37756feb3a 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/bufconvert/bufconvert.go +++ b/vendor/github.com/bufbuild/buf/private/buf/bufconvert/bufconvert.go @@ -23,7 +23,8 @@ import ( "github.com/bufbuild/buf/private/buf/bufref" "github.com/bufbuild/buf/private/pkg/app" "github.com/bufbuild/buf/private/pkg/stringutil" - "go.opencensus.io/trace" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/codes" ) const ( @@ -64,10 +65,12 @@ func NewMessageEncodingRef( value string, defaultEncoding MessageEncoding, ) (MessageEncodingRef, error) { - ctx, span := trace.StartSpan(ctx, "new_message_encoding_ref") + ctx, span := otel.GetTracerProvider().Tracer("bufbuild/buf").Start(ctx, "new_message_encoding_ref") defer span.End() path, messageEncoding, err := getPathAndMessageEncoding(ctx, value, defaultEncoding) if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) return nil, err } return newMessageEncodingRef(path, messageEncoding), nil diff --git a/vendor/github.com/bufbuild/buf/private/buf/buffetch/internal/errors.go b/vendor/github.com/bufbuild/buf/private/buf/buffetch/internal/errors.go index 0915a7cc94..d0cd27f729 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/buffetch/internal/errors.go +++ b/vendor/github.com/bufbuild/buf/private/buf/buffetch/internal/errors.go @@ -40,12 +40,12 @@ func NewFormatCannotBeDeterminedError(value string) error { // NewCannotSpecifyGitBranchAndTagError is a fetch error. func NewCannotSpecifyGitBranchAndTagError() error { - return fmt.Errorf(`must specify only one of "branch", "tag"`) + return errors.New(`must specify only one of "branch", "tag"`) } // NewCannotSpecifyTagWithRefError is a fetch error. func NewCannotSpecifyTagWithRefError() error { - return fmt.Errorf(`cannot specify "tag" with "ref"`) + return errors.New(`cannot specify "tag" with "ref"`) } // NewDepthParseError is a fetch error. @@ -55,7 +55,7 @@ func NewDepthParseError(s string) error { // NewDepthZeroError is a fetch error. func NewDepthZeroError() error { - return fmt.Errorf(`"depth" must be >0 if specified`) + return errors.New(`"depth" must be >0 if specified`) } // NewPathUnknownGzError is a fetch error. diff --git a/vendor/github.com/bufbuild/buf/private/buf/buffetch/internal/reader.go b/vendor/github.com/bufbuild/buf/private/buf/buffetch/internal/reader.go index 5240b7025d..2c30afd51b 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/buffetch/internal/reader.go +++ b/vendor/github.com/bufbuild/buf/private/buf/buffetch/internal/reader.go @@ -37,7 +37,9 @@ import ( "github.com/bufbuild/buf/private/pkg/storage/storageos" "github.com/klauspost/compress/zstd" "github.com/klauspost/pgzip" - "go.opencensus.io/trace" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" "go.uber.org/multierr" "go.uber.org/zap" ) @@ -59,6 +61,7 @@ type reader struct { moduleEnabled bool moduleReader bufmodule.ModuleReader moduleResolver bufmodule.ModuleResolver + tracer trace.Tracer } func newReader( @@ -69,6 +72,7 @@ func newReader( reader := &reader{ logger: logger, storageosProvider: storageosProvider, + tracer: otel.GetTracerProvider().Tracer("bufbuild/buf"), } for _, option := range options { option(reader) @@ -202,12 +206,16 @@ func (r *reader) getArchiveBucket( if err != nil { return nil, err } + readWriteBucket := storagemem.NewReadWriteBucket() + ctx, span := r.tracer.Start(ctx, "unarchive") + defer span.End() defer func() { retErr = multierr.Append(retErr, readCloser.Close()) + if retErr != nil { + span.RecordError(retErr) + span.SetStatus(codes.Error, retErr.Error()) + } }() - readWriteBucket := storagemem.NewReadWriteBucket() - ctx, span := trace.StartSpan(ctx, "unarchive") - defer span.End() switch archiveType := archiveRef.ArchiveType(); archiveType { case ArchiveTypeTar: if err := storagearchive.Untar( diff --git a/vendor/github.com/bufbuild/buf/private/buf/buffetch/ref_parser.go b/vendor/github.com/bufbuild/buf/private/buf/buffetch/ref_parser.go index 0c6d04393c..7f4a179f37 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/buffetch/ref_parser.go +++ b/vendor/github.com/bufbuild/buf/private/buf/buffetch/ref_parser.go @@ -25,14 +25,22 @@ import ( "github.com/bufbuild/buf/private/buf/buffetch/internal" "github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmoduleref" "github.com/bufbuild/buf/private/pkg/app" - "go.opencensus.io/trace" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" ) +const ( + loggerName = "buffetch" + tracerName = "bufbuild/buf" +) + type refParser struct { allowProtoFileRef bool logger *zap.Logger fetchRefParser internal.RefParser + tracer trace.Tracer } func newRefParser(logger *zap.Logger, options ...RefParserOption) *refParser { @@ -78,7 +86,8 @@ func newRefParser(logger *zap.Logger, options ...RefParserOption) *refParser { if refParser.allowProtoFileRef { fetchRefParserOptions = append(fetchRefParserOptions, internal.WithProtoFileFormat(formatProtoFile)) } - refParser.logger = logger.Named("buffetch") + refParser.logger = logger.Named(loggerName) + refParser.tracer = otel.GetTracerProvider().Tracer(tracerName) refParser.fetchRefParser = internal.NewRefParser( logger, fetchRefParserOptions..., @@ -88,7 +97,7 @@ func newRefParser(logger *zap.Logger, options ...RefParserOption) *refParser { func newImageRefParser(logger *zap.Logger) *refParser { return &refParser{ - logger: logger.Named("buffetch"), + logger: logger.Named(loggerName), fetchRefParser: internal.NewRefParser( logger, internal.WithRawRefProcessor(processRawRefImage), @@ -107,12 +116,13 @@ func newImageRefParser(logger *zap.Logger) *refParser { ), ), ), + tracer: otel.GetTracerProvider().Tracer(tracerName), } } func newSourceRefParser(logger *zap.Logger) *refParser { return &refParser{ - logger: logger.Named("buffetch"), + logger: logger.Named(loggerName), fetchRefParser: internal.NewRefParser( logger, internal.WithRawRefProcessor(processRawRefSource), @@ -134,23 +144,25 @@ func newSourceRefParser(logger *zap.Logger) *refParser { internal.WithGitFormat(formatGit), internal.WithDirFormat(formatDir), ), + tracer: otel.GetTracerProvider().Tracer(tracerName), } } func newModuleRefParser(logger *zap.Logger) *refParser { return &refParser{ - logger: logger.Named("buffetch"), + logger: logger.Named(loggerName), fetchRefParser: internal.NewRefParser( logger, internal.WithRawRefProcessor(processRawRefModule), internal.WithModuleFormat(formatMod), ), + tracer: otel.GetTracerProvider().Tracer(tracerName), } } func newSourceOrModuleRefParser(logger *zap.Logger) *refParser { return &refParser{ - logger: logger.Named("buffetch"), + logger: logger.Named(loggerName), fetchRefParser: internal.NewRefParser( logger, internal.WithRawRefProcessor(processRawRefSourceOrModule), @@ -173,15 +185,22 @@ func newSourceOrModuleRefParser(logger *zap.Logger) *refParser { internal.WithDirFormat(formatDir), internal.WithModuleFormat(formatMod), ), + tracer: otel.GetTracerProvider().Tracer(tracerName), } } func (a *refParser) GetRef( ctx context.Context, value string, -) (Ref, error) { - ctx, span := trace.StartSpan(ctx, "get_ref") +) (_ Ref, retErr error) { + ctx, span := a.tracer.Start(ctx, "get_ref") defer span.End() + defer func() { + if retErr != nil { + span.RecordError(retErr) + span.SetStatus(codes.Error, retErr.Error()) + } + }() parsedRef, err := a.getParsedRef(ctx, value, allFormats) if err != nil { return nil, err @@ -211,16 +230,21 @@ func (a *refParser) GetRef( func (a *refParser) GetSourceOrModuleRef( ctx context.Context, value string, -) (SourceOrModuleRef, error) { - ctx, span := trace.StartSpan(ctx, "get_source_or_module_ref") +) (_ SourceOrModuleRef, retErr error) { + ctx, span := a.tracer.Start(ctx, "get_source_or_module_ref") defer span.End() + defer func() { + if retErr != nil { + span.RecordError(retErr) + span.SetStatus(codes.Error, retErr.Error()) + } + }() parsedRef, err := a.getParsedRef(ctx, value, sourceOrModuleFormats) if err != nil { return nil, err } switch t := parsedRef.(type) { case internal.ParsedSingleRef: - // this should never happen return nil, fmt.Errorf("invalid ParsedRef type for source or module: %T", parsedRef) case internal.ParsedArchiveRef: return newSourceRef(t), nil @@ -240,16 +264,21 @@ func (a *refParser) GetSourceOrModuleRef( func (a *refParser) GetImageRef( ctx context.Context, value string, -) (ImageRef, error) { - ctx, span := trace.StartSpan(ctx, "get_image_ref") +) (_ ImageRef, retErr error) { + ctx, span := a.tracer.Start(ctx, "get_image_ref") defer span.End() + defer func() { + if retErr != nil { + span.RecordError(retErr) + span.SetStatus(codes.Error, retErr.Error()) + } + }() parsedRef, err := a.getParsedRef(ctx, value, imageFormats) if err != nil { return nil, err } parsedSingleRef, ok := parsedRef.(internal.ParsedSingleRef) if !ok { - // this should never happen return nil, fmt.Errorf("invalid ParsedRef type for image: %T", parsedRef) } imageEncoding, err := parseImageEncoding(parsedSingleRef.Format()) @@ -262,9 +291,15 @@ func (a *refParser) GetImageRef( func (a *refParser) GetSourceRef( ctx context.Context, value string, -) (SourceRef, error) { - ctx, span := trace.StartSpan(ctx, "get_source_ref") +) (_ SourceRef, retErr error) { + ctx, span := a.tracer.Start(ctx, "get_source_ref") defer span.End() + defer func() { + if retErr != nil { + span.RecordError(retErr) + span.SetStatus(codes.Error, retErr.Error()) + } + }() parsedRef, err := a.getParsedRef(ctx, value, sourceFormats) if err != nil { return nil, err @@ -280,9 +315,15 @@ func (a *refParser) GetSourceRef( func (a *refParser) GetModuleRef( ctx context.Context, value string, -) (ModuleRef, error) { - ctx, span := trace.StartSpan(ctx, "get_source_ref") +) (_ ModuleRef, retErr error) { + ctx, span := a.tracer.Start(ctx, "get_source_ref") defer span.End() + defer func() { + if retErr != nil { + span.RecordError(retErr) + span.SetStatus(codes.Error, retErr.Error()) + } + }() parsedRef, err := a.getParsedRef(ctx, value, moduleFormats) if err != nil { return nil, err diff --git a/vendor/github.com/bufbuild/buf/private/buf/bufgen/bufgen.go b/vendor/github.com/bufbuild/buf/private/buf/bufgen/bufgen.go index 33a7fab71d..fdc8e2d1a9 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/bufgen/bufgen.go +++ b/vendor/github.com/bufbuild/buf/private/buf/bufgen/bufgen.go @@ -181,9 +181,11 @@ type PluginConfig struct { // Optional Opt string // Optional, exclusive with Remote - Path string + Path []string // Required Strategy Strategy + // Optional + ProtocPath string } // PluginName returns this PluginConfig's plugin name. @@ -343,14 +345,15 @@ type ExternalConfigV1 struct { // ExternalPluginConfigV1 is an external plugin configuration. type ExternalPluginConfigV1 struct { - Plugin string `json:"plugin,omitempty" yaml:"plugin,omitempty"` - Revision int `json:"revision,omitempty" yaml:"revision,omitempty"` - Name string `json:"name,omitempty" yaml:"name,omitempty"` - Remote string `json:"remote,omitempty" yaml:"remote,omitempty"` - Out string `json:"out,omitempty" yaml:"out,omitempty"` - Opt interface{} `json:"opt,omitempty" yaml:"opt,omitempty"` - Path string `json:"path,omitempty" yaml:"path,omitempty"` - Strategy string `json:"strategy,omitempty" yaml:"strategy,omitempty"` + Plugin string `json:"plugin,omitempty" yaml:"plugin,omitempty"` + Revision int `json:"revision,omitempty" yaml:"revision,omitempty"` + Name string `json:"name,omitempty" yaml:"name,omitempty"` + Remote string `json:"remote,omitempty" yaml:"remote,omitempty"` + Out string `json:"out,omitempty" yaml:"out,omitempty"` + Opt interface{} `json:"opt,omitempty" yaml:"opt,omitempty"` + Path interface{} `json:"path,omitempty" yaml:"path,omitempty"` + ProtocPath string `json:"protoc_path,omitempty" yaml:"protoc_path,omitempty"` + Strategy string `json:"strategy,omitempty" yaml:"strategy,omitempty"` } // ExternalManagedConfigV1 is an external managed mode configuration. diff --git a/vendor/github.com/bufbuild/buf/private/buf/bufgen/config.go b/vendor/github.com/bufbuild/buf/private/buf/bufgen/config.go index 66a6f80828..e92af3c6cf 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/bufgen/config.go +++ b/vendor/github.com/bufbuild/buf/private/buf/bufgen/config.go @@ -143,15 +143,20 @@ func newConfigV1(logger *zap.Logger, externalConfig ExternalConfigV1, id string) if err != nil { return nil, err } + path, err := encoding.InterfaceSliceOrStringToStringSlice(plugin.Path) + if err != nil { + return nil, err + } pluginConfig := &PluginConfig{ - Plugin: plugin.Plugin, - Revision: plugin.Revision, - Name: plugin.Name, - Remote: plugin.Remote, - Out: plugin.Out, - Opt: opt, - Path: plugin.Path, - Strategy: strategy, + Plugin: plugin.Plugin, + Revision: plugin.Revision, + Name: plugin.Name, + Remote: plugin.Remote, + Out: plugin.Out, + Opt: opt, + Path: path, + ProtocPath: plugin.ProtocPath, + Strategy: strategy, } if pluginConfig.IsRemote() { // Always use StrategyAll for remote plugins @@ -228,12 +233,15 @@ func validateExternalConfigV1(externalConfig ExternalConfigV1, id string) error } func checkPathAndStrategyUnset(id string, plugin ExternalPluginConfigV1, pluginIdentifier string) error { - if plugin.Path != "" { + if plugin.Path != nil { return fmt.Errorf("%s: remote plugin %s cannot specify a path", id, pluginIdentifier) } if plugin.Strategy != "" { return fmt.Errorf("%s: remote plugin %s cannot specify a strategy", id, pluginIdentifier) } + if plugin.ProtocPath != "" { + return fmt.Errorf("%s: remote plugin %s cannot specify a protoc path", id, pluginIdentifier) + } return nil } @@ -578,7 +586,7 @@ func newConfigV1Beta1(externalConfig ExternalConfigV1Beta1, id string) (*Config, Name: plugin.Name, Out: plugin.Out, Opt: opt, - Path: plugin.Path, + Path: []string{plugin.Path}, Strategy: strategy, }, ) diff --git a/vendor/github.com/bufbuild/buf/private/buf/bufgen/generator.go b/vendor/github.com/bufbuild/buf/private/buf/bufgen/generator.go index 8f35f734cd..5c125042dd 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/bufgen/generator.go +++ b/vendor/github.com/bufbuild/buf/private/buf/bufgen/generator.go @@ -200,6 +200,8 @@ func (g *generator) execPlugins( } // Batch for each remote. for remote, indexedPluginConfigs := range remotePluginConfigTable { + remote := remote + indexedPluginConfigs := indexedPluginConfigs v1Args := make([]*remotePluginExecArgs, 0, len(indexedPluginConfigs)) v2Args := make([]*remotePluginExecArgs, 0, len(indexedPluginConfigs)) for _, param := range indexedPluginConfigs { @@ -304,7 +306,8 @@ func (g *generator) execLocalPlugin( includeImports, includeWellKnownTypes, ), - appprotoexec.GenerateWithPluginPath(pluginConfig.Path), + appprotoexec.GenerateWithPluginPath(pluginConfig.Path...), + appprotoexec.GenerateWithProtocPath(pluginConfig.ProtocPath), ) if err != nil { return nil, fmt.Errorf("plugin %s: %v", pluginConfig.PluginName(), err) diff --git a/vendor/github.com/bufbuild/buf/private/buf/bufgen/provider.go b/vendor/github.com/bufbuild/buf/private/buf/bufgen/provider.go index e79ccf5413..79698151d5 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/bufgen/provider.go +++ b/vendor/github.com/bufbuild/buf/private/buf/bufgen/provider.go @@ -20,24 +20,34 @@ import ( "github.com/bufbuild/buf/private/pkg/encoding" "github.com/bufbuild/buf/private/pkg/storage" - "go.opencensus.io/trace" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" "go.uber.org/multierr" "go.uber.org/zap" ) type provider struct { logger *zap.Logger + tracer trace.Tracer } func newProvider(logger *zap.Logger) *provider { return &provider{ logger: logger, + tracer: otel.GetTracerProvider().Tracer("bufbuild/buf"), } } func (p *provider) GetConfig(ctx context.Context, readBucket storage.ReadBucket) (_ *Config, retErr error) { - ctx, span := trace.StartSpan(ctx, "get_config") + ctx, span := p.tracer.Start(ctx, "get_config") defer span.End() + defer func() { + if retErr != nil { + span.RecordError(retErr) + span.SetStatus(codes.Error, retErr.Error()) + } + }() readObjectCloser, err := readBucket.Get(ctx, ExternalConfigFilePath) if err != nil { diff --git a/vendor/github.com/bufbuild/buf/private/buf/bufmigrate/v1beta1_migrator.go b/vendor/github.com/bufbuild/buf/private/buf/bufmigrate/v1beta1_migrator.go index c95d199b27..8b62262419 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/bufmigrate/v1beta1_migrator.go +++ b/vendor/github.com/bufbuild/buf/private/buf/bufmigrate/v1beta1_migrator.go @@ -360,14 +360,16 @@ func (m *v1beta1Migrator) maybeMigrateGenTemplate(dirPath string) (bool, error) }, } for _, plugin := range v1beta1GenTemplate.Plugins { - v1GenTemplate.Plugins = append( - v1GenTemplate.Plugins, bufgen.ExternalPluginConfigV1{ - Name: plugin.Name, - Out: plugin.Out, - Opt: plugin.Opt, - Path: plugin.Path, - Strategy: plugin.Strategy, - }) + pluginConfig := bufgen.ExternalPluginConfigV1{ + Name: plugin.Name, + Out: plugin.Out, + Opt: plugin.Opt, + Strategy: plugin.Strategy, + } + if plugin.Path != "" { + pluginConfig.Path = plugin.Path + } + v1GenTemplate.Plugins = append(v1GenTemplate.Plugins, pluginConfig) } newConfigPath := filepath.Join(dirPath, bufgen.ExternalConfigFilePath) if err := m.writeV1GenTemplate(newConfigPath, v1GenTemplate); err != nil { diff --git a/vendor/github.com/bufbuild/buf/private/buf/bufwire/file_lister.go b/vendor/github.com/bufbuild/buf/private/buf/bufwire/file_lister.go index 3df9a9b029..30c93deff5 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/bufwire/file_lister.go +++ b/vendor/github.com/bufbuild/buf/private/buf/bufwire/file_lister.go @@ -258,7 +258,7 @@ func (e *fileLister) sourceFileInfosForDirectory( if err != nil { return nil, err } - module, err := e.moduleBucketBuilder.BuildForBucket( + module, err := bufmodulebuild.BuildForBucket( ctx, mappedReadBucket, config.Build, diff --git a/vendor/github.com/bufbuild/buf/private/buf/bufwire/image_config_reader.go b/vendor/github.com/bufbuild/buf/private/buf/bufwire/image_config_reader.go index 398c317c3d..4abed65fc9 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/bufwire/image_config_reader.go +++ b/vendor/github.com/bufbuild/buf/private/buf/bufwire/image_config_reader.go @@ -28,7 +28,8 @@ import ( "github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmodulebuild" "github.com/bufbuild/buf/private/pkg/app" "github.com/bufbuild/buf/private/pkg/storage/storageos" - "go.opencensus.io/trace" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/codes" "go.uber.org/zap" ) @@ -240,7 +241,7 @@ func (i *imageConfigReader) buildModule( moduleFileSet bufmodule.ModuleFileSet, excludeSourceCodeInfo bool, ) (ImageConfig, []bufanalysis.FileAnnotation, error) { - ctx, span := trace.StartSpan(ctx, "build_module") + ctx, span := otel.GetTracerProvider().Tracer("bufbuild/buf").Start(ctx, "build_module") defer span.End() var options []bufimagebuild.BuildOption if excludeSourceCodeInfo { @@ -252,6 +253,8 @@ func (i *imageConfigReader) buildModule( options..., ) if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) return nil, nil, err } if len(fileAnnotations) > 0 { diff --git a/vendor/github.com/bufbuild/buf/private/buf/bufwire/image_reader.go b/vendor/github.com/bufbuild/buf/private/buf/bufwire/image_reader.go index f31c54ef22..326a99016b 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/bufwire/image_reader.go +++ b/vendor/github.com/bufbuild/buf/private/buf/bufwire/image_reader.go @@ -24,14 +24,22 @@ import ( imagev1 "github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/image/v1" "github.com/bufbuild/buf/private/pkg/app" "github.com/bufbuild/buf/private/pkg/protoencoding" - "go.opencensus.io/trace" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" "go.uber.org/multierr" "go.uber.org/zap" ) +const ( + loggerName = "bufwire" + tracerName = "bufbuild/buf" +) + type imageReader struct { logger *zap.Logger fetchReader buffetch.ImageReader + tracer trace.Tracer } func newImageReader( @@ -39,8 +47,9 @@ func newImageReader( fetchReader buffetch.ImageReader, ) *imageReader { return &imageReader{ - logger: logger.Named("bufwire"), + logger: logger.Named(loggerName), fetchReader: fetchReader, + tracer: otel.GetTracerProvider().Tracer(tracerName), } } @@ -53,8 +62,14 @@ func (i *imageReader) GetImage( externalDirOrFilePathsAllowNotExist bool, excludeSourceCodeInfo bool, ) (_ bufimage.Image, retErr error) { - ctx, span := trace.StartSpan(ctx, "get_image") + ctx, span := i.tracer.Start(ctx, "get_image") defer span.End() + defer func() { + if retErr != nil { + span.RecordError(retErr) + span.SetStatus(codes.Error, retErr.Error()) + } + }() readCloser, err := i.fetchReader.GetImageFile(ctx, container, imageRef) if err != nil { return nil, err @@ -73,35 +88,47 @@ func (i *imageReader) GetImage( // See https://github.com/golang/protobuf/issues/1123 // TODO: revisit case buffetch.ImageEncodingBin: - _, span := trace.StartSpan(ctx, "wire_unmarshal") + _, span := i.tracer.Start(ctx, "wire_unmarshal") if err := protoencoding.NewWireUnmarshaler(nil).Unmarshal(data, protoImage); err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + span.End() return nil, fmt.Errorf("could not unmarshal image: %v", err) } span.End() case buffetch.ImageEncodingJSON: firstProtoImage := &imagev1.Image{} - _, span := trace.StartSpan(ctx, "first_json_unmarshal") + _, span := i.tracer.Start(ctx, "first_json_unmarshal") if err := protoencoding.NewJSONUnmarshaler(nil).Unmarshal(data, firstProtoImage); err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + span.End() return nil, fmt.Errorf("could not unmarshal image: %v", err) } // TODO right now, NewResolver sets AllowUnresolvable to true all the time // we want to make this into a check, and we verify if we need this for the individual command span.End() - _, span = trace.StartSpan(ctx, "new_resolver") + _, newResolverSpan := i.tracer.Start(ctx, "new_resolver") resolver, err := protoencoding.NewResolver( bufimage.ProtoImageToFileDescriptors( firstProtoImage, )..., ) if err != nil { + newResolverSpan.RecordError(err) + newResolverSpan.SetStatus(codes.Error, err.Error()) + newResolverSpan.End() return nil, err } - span.End() - _, span = trace.StartSpan(ctx, "second_json_unmarshal") + newResolverSpan.End() + _, jsonUnmarshalSpan := i.tracer.Start(ctx, "second_json_unmarshal") if err := protoencoding.NewJSONUnmarshaler(resolver).Unmarshal(data, protoImage); err != nil { + jsonUnmarshalSpan.RecordError(err) + jsonUnmarshalSpan.SetStatus(codes.Error, err.Error()) + jsonUnmarshalSpan.End() return nil, fmt.Errorf("could not unmarshal image: %v", err) } - span.End() + jsonUnmarshalSpan.End() // we've already re-parsed, by unmarshalling 2x above imageFromProtoOptions = append(imageFromProtoOptions, bufimage.WithNoReparse()) default: diff --git a/vendor/github.com/bufbuild/buf/private/buf/bufwire/image_writer.go b/vendor/github.com/bufbuild/buf/private/buf/bufwire/image_writer.go index 1854dc3406..3d92db6851 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/bufwire/image_writer.go +++ b/vendor/github.com/bufbuild/buf/private/buf/bufwire/image_writer.go @@ -22,7 +22,8 @@ import ( "github.com/bufbuild/buf/private/bufpkg/bufimage" "github.com/bufbuild/buf/private/pkg/app" "github.com/bufbuild/buf/private/pkg/protoencoding" - "go.opencensus.io/trace" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/codes" "go.uber.org/multierr" "go.uber.org/zap" "google.golang.org/protobuf/proto" @@ -51,8 +52,14 @@ func (i *imageWriter) PutImage( asFileDescriptorSet bool, excludeImports bool, ) (retErr error) { - ctx, span := trace.StartSpan(ctx, "put_image") + ctx, span := otel.GetTracerProvider().Tracer("bufbuild/buf").Start(ctx, "put_image") defer span.End() + defer func() { + if retErr != nil { + span.RecordError(retErr) + span.SetStatus(codes.Error, retErr.Error()) + } + }() // stop short for performance if imageRef.IsNull() { return nil @@ -87,9 +94,15 @@ func (i *imageWriter) imageMarshal( message proto.Message, image bufimage.Image, imageEncoding buffetch.ImageEncoding, -) ([]byte, error) { - _, span := trace.StartSpan(ctx, "image_marshal") +) (_ []byte, retErr error) { + _, span := otel.GetTracerProvider().Tracer("bufbuild/buf").Start(ctx, "image_marshal") defer span.End() + defer func() { + if retErr != nil { + span.RecordError(retErr) + span.SetStatus(codes.Error, retErr.Error()) + } + }() switch imageEncoding { case buffetch.ImageEncodingBin: return protoencoding.NewWireMarshaler().Marshal(message) diff --git a/vendor/github.com/bufbuild/buf/private/buf/bufwire/module_config_reader.go b/vendor/github.com/bufbuild/buf/private/buf/bufwire/module_config_reader.go index bda6ad4493..abfed54ae9 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/bufwire/module_config_reader.go +++ b/vendor/github.com/bufbuild/buf/private/buf/bufwire/module_config_reader.go @@ -31,7 +31,9 @@ import ( "github.com/bufbuild/buf/private/pkg/storage" "github.com/bufbuild/buf/private/pkg/storage/storageos" "github.com/bufbuild/buf/private/pkg/stringutil" - "go.opencensus.io/trace" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" "go.uber.org/multierr" "go.uber.org/zap" ) @@ -41,6 +43,7 @@ type moduleConfigReader struct { storageosProvider storageos.Provider fetchReader buffetch.Reader moduleBucketBuilder bufmodulebuild.ModuleBucketBuilder + tracer trace.Tracer } func newModuleConfigReader( @@ -54,6 +57,7 @@ func newModuleConfigReader( storageosProvider: storageosProvider, fetchReader: fetchReader, moduleBucketBuilder: moduleBucketBuilder, + tracer: otel.GetTracerProvider().Tracer("bufbuild/buf"), } } @@ -65,9 +69,15 @@ func (m *moduleConfigReader) GetModuleConfigs( externalDirOrFilePaths []string, externalExcludeDirOrFilePaths []string, externalDirOrFilePathsAllowNotExist bool, -) ([]ModuleConfig, error) { - ctx, span := trace.StartSpan(ctx, "get_module_config") +) (_ []ModuleConfig, retErr error) { + ctx, span := m.tracer.Start(ctx, "get_module_config") defer span.End() + defer func() { + if retErr != nil { + span.RecordError(retErr) + span.SetStatus(codes.Error, retErr.Error()) + } + }() // We construct a new WorkspaceBuilder here so that the cache is only used for a single call. workspaceBuilder := bufwork.NewWorkspaceBuilder(m.moduleBucketBuilder) switch t := sourceOrModuleRef.(type) { @@ -657,7 +667,7 @@ func (m *moduleConfigReader) getModuleConfig( } buildOptions = append(buildOptions, bufmodulebuild.WithExcludePaths(bucketRelPaths)) } - module, err := m.moduleBucketBuilder.BuildForBucket( + module, err := bufmodulebuild.BuildForBucket( ctx, mappedReadBucket, moduleConfig.Build, diff --git a/vendor/github.com/bufbuild/buf/private/buf/bufwire/proto_encoding_reader.go b/vendor/github.com/bufbuild/buf/private/buf/bufwire/proto_encoding_reader.go index b2dc296d09..0922f237fe 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/bufwire/proto_encoding_reader.go +++ b/vendor/github.com/bufbuild/buf/private/buf/bufwire/proto_encoding_reader.go @@ -26,7 +26,8 @@ import ( "github.com/bufbuild/buf/private/bufpkg/bufreflect" "github.com/bufbuild/buf/private/pkg/app" "github.com/bufbuild/buf/private/pkg/protoencoding" - "go.opencensus.io/trace" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/codes" "go.uber.org/multierr" "go.uber.org/zap" "google.golang.org/protobuf/proto" @@ -53,8 +54,14 @@ func (p *protoEncodingReader) GetMessage( typeName string, messageRef bufconvert.MessageEncodingRef, ) (_ proto.Message, retErr error) { - ctx, span := trace.StartSpan(ctx, "get_message") + ctx, span := otel.GetTracerProvider().Tracer("bufbuild/buf").Start(ctx, "get_message") defer span.End() + defer func() { + if retErr != nil { + span.RecordError(retErr) + span.SetStatus(codes.Error, retErr.Error()) + } + }() // Currently, this support bin and JSON format. resolver, err := protoencoding.NewResolver( bufimage.ImageToFileDescriptors( @@ -71,7 +78,7 @@ func (p *protoEncodingReader) GetMessage( case bufconvert.MessageEncodingJSON: unmarshaler = protoencoding.NewJSONUnmarshaler(resolver) default: - return nil, fmt.Errorf("unknown message encoding type") + return nil, errors.New("unknown message encoding type") } readCloser := io.NopCloser(container.Stdin()) if messageRef.Path() != "-" { diff --git a/vendor/github.com/bufbuild/buf/private/buf/bufwire/proto_encoding_writer.go b/vendor/github.com/bufbuild/buf/private/buf/bufwire/proto_encoding_writer.go index 718f330ee5..6e0ef76248 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/bufwire/proto_encoding_writer.go +++ b/vendor/github.com/bufbuild/buf/private/buf/bufwire/proto_encoding_writer.go @@ -16,7 +16,7 @@ package bufwire import ( "context" - "fmt" + "errors" "os" "github.com/bufbuild/buf/private/buf/bufconvert" @@ -66,7 +66,7 @@ func (p *protoEncodingWriter) PutMessage( case bufconvert.MessageEncodingJSON: marshaler = protoencoding.NewJSONMarshalerIndent(resolver) default: - return fmt.Errorf("unknown message encoding type") + return errors.New("unknown message encoding type") } data, err := marshaler.Marshal(message) if err != nil { diff --git a/vendor/github.com/bufbuild/buf/private/buf/bufwork/get.go b/vendor/github.com/bufbuild/buf/private/buf/bufwork/get.go index 63c5219d33..c32ca2ab3d 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/bufwork/get.go +++ b/vendor/github.com/bufbuild/buf/private/buf/bufwork/get.go @@ -24,13 +24,20 @@ import ( "github.com/bufbuild/buf/private/pkg/normalpath" "github.com/bufbuild/buf/private/pkg/storage" "github.com/bufbuild/buf/private/pkg/stringutil" - "go.opencensus.io/trace" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/codes" "go.uber.org/multierr" ) func getConfigForBucket(ctx context.Context, readBucket storage.ReadBucket, relativeRootPath string) (_ *Config, retErr error) { - ctx, span := trace.StartSpan(ctx, "get_workspace_config") + ctx, span := otel.GetTracerProvider().Tracer("bufbuild/buf").Start(ctx, "get_workspace_config") defer span.End() + defer func() { + if retErr != nil { + span.RecordError(retErr) + span.SetStatus(codes.Error, retErr.Error()) + } + }() // This will be in the order of precedence. var foundConfigFilePaths []string @@ -77,9 +84,9 @@ func getConfigForBucket(ctx context.Context, readBucket storage.ReadBucket, rela } func getConfigForData(ctx context.Context, data []byte) (*Config, error) { - _, span := trace.StartSpan(ctx, "get_workspace_config_for_data") + _, span := otel.GetTracerProvider().Tracer("bufbuild/buf").Start(ctx, "get_workspace_config_for_data") defer span.End() - return getConfigForDataInternal( + config, err := getConfigForDataInternal( ctx, encoding.UnmarshalJSONOrYAMLNonStrict, encoding.UnmarshalJSONOrYAMLStrict, @@ -87,6 +94,11 @@ func getConfigForData(ctx context.Context, data []byte) (*Config, error) { data, "Configuration data", ) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + } + return config, err } func getConfigForDataInternal( diff --git a/vendor/github.com/bufbuild/buf/private/buf/bufwork/workspace_builder.go b/vendor/github.com/bufbuild/buf/private/buf/bufwork/workspace_builder.go index 465bf4bd10..7bbdb582e6 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/bufwork/workspace_builder.go +++ b/vendor/github.com/bufbuild/buf/private/buf/bufwork/workspace_builder.go @@ -143,7 +143,7 @@ func (w *workspaceBuilder) BuildWorkspace( if err != nil { return nil, err } - module, err := w.moduleBucketBuilder.BuildForBucket( + module, err := bufmodulebuild.BuildForBucket( ctx, readBucketForDirectory, moduleConfig.Build, diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/buf.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/buf.go index 5f8474fe9e..252c8ea88b 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/buf.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/buf.go @@ -105,7 +105,7 @@ func NewRootCommand(name string) *appcmd.Command { return &appcmd.Command{ Use: name, Short: "The Buf CLI", - Long: "A tool for working with Protocol Buffers and managing resources on the Buf Schema Registry (BSR).", + Long: "A tool for working with Protocol Buffers and managing resources on the Buf Schema Registry (BSR)", Version: bufcli.Version, BindPersistentFlags: appcmd.BindMultiple(builder.BindRoot, globalFlags.BindRoot), SubCommands: []*appcmd.Command{ @@ -121,7 +121,7 @@ func NewRootCommand(name string) *appcmd.Command { curl.NewCommand("curl", builder), { Use: "mod", - Short: "Manage Buf modules.", + Short: "Manage Buf modules", SubCommands: []*appcmd.Command{ modinit.NewCommand("init", builder), modprune.NewCommand("prune", builder), @@ -134,7 +134,7 @@ func NewRootCommand(name string) *appcmd.Command { }, { Use: "registry", - Short: "Manage assets on the Buf Schema Registry.", + Short: "Manage assets on the Buf Schema Registry", SubCommands: []*appcmd.Command{ registrylogin.NewCommand("login", builder), registrylogout.NewCommand("logout", builder), @@ -142,18 +142,17 @@ func NewRootCommand(name string) *appcmd.Command { }, { Use: "beta", - Short: "Beta commands. Unstable and likely to change.", + Short: "Beta commands. Unstable and likely to change", SubCommands: []*appcmd.Command{ - convert.NewCommand("convert", builder), migratev1beta1.NewCommand("migrate-v1beta1", builder), studioagent.NewCommand("studio-agent", noTimeoutBuilder), { Use: "registry", - Short: "Manage assets on the Buf Schema Registry.", + Short: "Manage assets on the Buf Schema Registry", SubCommands: []*appcmd.Command{ { Use: "organization", - Short: "Manage organizations.", + Short: "Manage organizations", SubCommands: []*appcmd.Command{ organizationcreate.NewCommand("create", builder), organizationget.NewCommand("get", builder), @@ -162,7 +161,7 @@ func NewRootCommand(name string) *appcmd.Command { }, { Use: "repository", - Short: "Manage repositories.", + Short: "Manage repositories", SubCommands: []*appcmd.Command{ repositorycreate.NewCommand("create", builder), repositoryget.NewCommand("get", builder), @@ -175,7 +174,7 @@ func NewRootCommand(name string) *appcmd.Command { }, { Use: "tag", - Short: "Manage a repository's tags.", + Short: "Manage a repository's tags", SubCommands: []*appcmd.Command{ tagcreate.NewCommand("create", builder), taglist.NewCommand("list", builder), @@ -183,7 +182,7 @@ func NewRootCommand(name string) *appcmd.Command { }, { Use: "commit", - Short: "Manage a repository's commits.", + Short: "Manage a repository's commits", SubCommands: []*appcmd.Command{ commitget.NewCommand("get", builder), commitlist.NewCommand("list", builder), @@ -191,7 +190,7 @@ func NewRootCommand(name string) *appcmd.Command { }, { Use: "draft", - Short: "Manage a repository's drafts.", + Short: "Manage a repository's drafts", SubCommands: []*appcmd.Command{ draftdelete.NewCommand("delete", builder), draftlist.NewCommand("list", builder), @@ -199,7 +198,7 @@ func NewRootCommand(name string) *appcmd.Command { }, { Use: "plugin", - Short: "Manage Protobuf plugins.", + Short: "Manage Protobuf plugins", SubCommands: []*appcmd.Command{ plugincreate.NewCommand("create", builder), pluginlist.NewCommand("list", builder), @@ -208,7 +207,7 @@ func NewRootCommand(name string) *appcmd.Command { pluginundeprecate.NewCommand("undeprecate", builder), { Use: "version", - Short: "Manage Protobuf plugin versions.", + Short: "Manage Protobuf plugin versions", SubCommands: []*appcmd.Command{ pluginversionlist.NewCommand("list", builder), }, @@ -217,7 +216,7 @@ func NewRootCommand(name string) *appcmd.Command { }, { Use: "template", - Short: "Manage Protobuf templates on the Buf Schema Registry.", + Short: "Manage Protobuf templates on the Buf Schema Registry", SubCommands: []*appcmd.Command{ templatecreate.NewCommand("create", builder), templatelist.NewCommand("list", builder), @@ -226,7 +225,7 @@ func NewRootCommand(name string) *appcmd.Command { templateundeprecate.NewCommand("undeprecate", builder), { Use: "version", - Short: "Manage Protobuf template versions.", + Short: "Manage Protobuf template versions", SubCommands: []*appcmd.Command{ templateversioncreate.NewCommand("create", builder), templateversionlist.NewCommand("list", builder), @@ -236,7 +235,7 @@ func NewRootCommand(name string) *appcmd.Command { }, { Use: "webhook", - Short: "Manage webhooks for a repository on the Buf Schema Registry.", + Short: "Manage webhooks for a repository on the Buf Schema Registry", SubCommands: []*appcmd.Command{ webhookcreate.NewCommand("create", builder), webhookdelete.NewCommand("delete", builder), @@ -249,17 +248,17 @@ func NewRootCommand(name string) *appcmd.Command { }, { Use: "alpha", - Short: "Alpha commands. Unstable and recommended only for experimentation. These may be deleted.", + Short: "Alpha commands. Unstable and recommended only for experimentation. These may be deleted", Hidden: true, SubCommands: []*appcmd.Command{ protoc.NewCommand("protoc", builder), { Use: "registry", - Short: "Manage assets on the Buf Schema Registry.", + Short: "Manage assets on the Buf Schema Registry", SubCommands: []*appcmd.Command{ { Use: "token", - Short: "Manage user tokens.", + Short: "Manage user tokens", SubCommands: []*appcmd.Command{ tokencreate.NewCommand("create", builder), tokenget.NewCommand("get", builder), @@ -271,7 +270,7 @@ func NewRootCommand(name string) *appcmd.Command { }, { Use: "plugin", - Short: "Manage plugins on the Buf Schema Registry.", + Short: "Manage plugins on the Buf Schema Registry", SubCommands: []*appcmd.Command{ pluginpush.NewCommand("push", builder), curatedplugindelete.NewCommand("delete", builder), diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/plugin/plugindelete/plugindelete.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/plugin/plugindelete/plugindelete.go index 1840fce851..8b5d9672bd 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/plugin/plugindelete/plugindelete.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/plugin/plugindelete/plugindelete.go @@ -39,7 +39,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "Delete a plugin from the registry.", + Short: "Delete a plugin from the registry", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/plugin/pluginpush/pluginpush.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/plugin/pluginpush/pluginpush.go index 4dcebf733c..d83f609c40 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/plugin/pluginpush/pluginpush.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/plugin/pluginpush/pluginpush.go @@ -65,7 +65,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "Push a plugin to a registry.", + Short: "Push a plugin to a registry", Long: bufcli.GetSourceDirLong(`the source to push (directory containing buf.plugin.yaml or plugin release zip)`), Args: cobra.MaximumNArgs(1), Run: builder.NewRunFunc( @@ -103,7 +103,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { errorFormatFlagName, "text", fmt.Sprintf( - "The format for build errors printed to stderr. Must be one of %s.", + "The format for build errors printed to stderr. Must be one of %s", stringutil.SliceToString(bufanalysis.AllFormatStrings), ), ) @@ -111,13 +111,13 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.OverrideRemote, overrideRemoteFlagName, "", - "Override the default remote found in buf.plugin.yaml name and dependencies.", + "Override the default remote found in buf.plugin.yaml name and dependencies", ) flagSet.StringVar( &f.Image, imageFlagName, "", - "Existing image to push.", + "Existing image to push", ) } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/protoc/plugin.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/protoc/plugin.go index 23d6fe8a9a..bdbd760c86 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/protoc/plugin.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/protoc/plugin.go @@ -51,11 +51,16 @@ func executePlugin( pluginName string, pluginInfo *pluginInfo, ) (*pluginpb.CodeGeneratorResponse, error) { - response, err := appprotoexec.NewGenerator( + generator := appprotoexec.NewGenerator( logger, storageosProvider, runner, - ).Generate( + ) + var options []appprotoexec.GenerateOption + if pluginInfo.Path != "" { + options = append(options, appprotoexec.GenerateWithPluginPath(pluginInfo.Path)) + } + response, err := generator.Generate( ctx, container, pluginName, @@ -66,7 +71,7 @@ func executePlugin( false, false, ), - appprotoexec.GenerateWithPluginPath(pluginInfo.Path), + options..., ) if err != nil { return nil, fmt.Errorf("--%s_out: %v", pluginName, err) diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/protoc/protoc.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/protoc/protoc.go index 3b18ae7c6b..4dbd2b18ef 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/protoc/protoc.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/protoc/protoc.go @@ -34,7 +34,8 @@ import ( "github.com/bufbuild/buf/private/pkg/app/appproto/appprotoos" "github.com/bufbuild/buf/private/pkg/command" "github.com/bufbuild/buf/private/pkg/storage/storageos" - "go.opencensus.io/trace" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/codes" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) @@ -47,7 +48,7 @@ func NewCommand( flagsBuilder := newFlagsBuilder() return &appcmd.Command{ Use: name + " ...", - Short: "High-performance protoc replacement.", + Short: "High-performance protoc replacement", Long: `This command replaces protoc using Buf's internal compiler. The implementation is in progress. Although it outperforms mainline protoc, @@ -181,9 +182,12 @@ func run( if len(env.PluginNameToPluginInfo) > 0 { images := []bufimage.Image{image} if env.ByDir { - _, span := trace.StartSpan(ctx, "image_by_dir") + _, span := otel.GetTracerProvider().Tracer("bufbuild/buf").Start(ctx, "image_by_dir") images, err = bufimage.ImageByDir(image) if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + span.End() return err } span.End() diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/registry/token/tokencreate/tokencreate.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/registry/token/tokencreate/tokencreate.go index a97d99a0ea..2607b8737e 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/registry/token/tokencreate/tokencreate.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/registry/token/tokencreate/tokencreate.go @@ -45,7 +45,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "Create a new token for a user.", + Short: "Create a new token for a user", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/registry/token/tokendelete/tokendelete.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/registry/token/tokendelete/tokendelete.go index 68f799081d..08647dd423 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/registry/token/tokendelete/tokendelete.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/registry/token/tokendelete/tokendelete.go @@ -42,7 +42,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "Delete a token by ID.", + Short: "Delete a token by ID", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { @@ -68,13 +68,13 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.Force, forceFlagName, false, - "Force deletion without confirming. Use with caution.", + "Force deletion without confirming. Use with caution", ) flagSet.StringVar( &f.TokenID, tokenIDFlagName, "", - "The ID of the token to delete.", + "The ID of the token to delete", ) _ = cobra.MarkFlagRequired(flagSet, tokenIDFlagName) } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/registry/token/tokenget/tokenget.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/registry/token/tokenget/tokenget.go index 22c2f083c6..9d35bfc853 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/registry/token/tokenget/tokenget.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/registry/token/tokenget/tokenget.go @@ -44,7 +44,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "Get a token by ID.", + Short: "Get a token by ID", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { @@ -76,7 +76,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.TokenID, tokenIDFlagName, "", - "The ID of the token to get.", + "The ID of the token to get", ) _ = cobra.MarkFlagRequired(flagSet, tokenIDFlagName) } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/registry/token/tokenlist/tokenlist.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/registry/token/tokenlist/tokenlist.go index 9c42ed9402..f406aa62c4 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/registry/token/tokenlist/tokenlist.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/alpha/registry/token/tokenlist/tokenlist.go @@ -46,7 +46,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "List my tokens.", + Short: "List user tokens", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/migratev1beta1/migratev1beta1.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/migratev1beta1/migratev1beta1.go index 06be53afb2..1bddf81a07 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/migratev1beta1/migratev1beta1.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/migratev1beta1/migratev1beta1.go @@ -33,9 +33,10 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: `Migrate any v1beta1 configuration files in the directory to the latest version.`, - Long: `Defaults to the current directory if not specified.`, - Args: cobra.MaximumNArgs(1), + Short: `Migrate v1beta1 configuration to the latest version`, + Long: `Migrate any v1beta1 configuration files in the directory to the latest version +Defaults to the current directory if not specified`, + Args: cobra.MaximumNArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { return run(ctx, container, flags) diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/commit/commitget/commitget.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/commit/commitget/commitget.go index f5214c4611..5266e93a86 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/commit/commitget/commitget.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/commit/commitget/commitget.go @@ -40,8 +40,8 @@ func NewCommand( ) *appcmd.Command { flags := newFlags() return &appcmd.Command{ - Use: name + " ", - Short: "Get details about a commit.", + Use: name + " ", + Short: "Get commit details", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/commit/commitlist/commitlist.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/commit/commitlist/commitlist.go index 0076fe829b..98664e14f1 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/commit/commitlist/commitlist.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/commit/commitlist/commitlist.go @@ -45,8 +45,8 @@ func NewCommand( ) *appcmd.Command { flags := newFlags() return &appcmd.Command{ - Use: name + " ", - Short: "List commits.", + Use: name + " ", + Short: "List repository commits", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { @@ -73,17 +73,17 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { flagSet.Uint32Var(&f.PageSize, pageSizeFlagName, 10, - `The page size.`, + `The page size`, ) flagSet.StringVar(&f.PageToken, pageTokenFlagName, "", - `The page token. If more results are available, a "next_page" key is present in the --format=json output.`, + `The page token. If more results are available, a "next_page" key is present in the --format=json output`, ) flagSet.BoolVar(&f.Reverse, reverseFlagName, false, - `Reverse the results.`, + `Reverse the results`, ) flagSet.StringVar( &f.Format, diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/draft/draftdelete/draftdelete.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/draft/draftdelete/draftdelete.go index 7eef31b73a..719b23e86c 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/draft/draftdelete/draftdelete.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/draft/draftdelete/draftdelete.go @@ -40,8 +40,8 @@ func NewCommand( ) *appcmd.Command { flags := newFlags() return &appcmd.Command{ - Use: name + " ", - Short: "Delete a draft of a BSR repository by name.", + Use: name + " ", + Short: "Delete a repository draft", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { @@ -66,7 +66,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.Force, forceFlagName, false, - "Force deletion without confirming. Use with caution.", + "Force deletion without confirming. Use with caution", ) } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/draft/draftlist/draftlist.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/draft/draftlist/draftlist.go index ce5383ecfe..9d4b6ca685 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/draft/draftlist/draftlist.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/draft/draftlist/draftlist.go @@ -43,7 +43,7 @@ func NewCommand(name string, builder appflag.Builder) *appcmd.Command { flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "List drafts for the specified repository.", + Short: "List repository drafts", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { @@ -70,17 +70,17 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { flagSet.Uint32Var(&f.PageSize, pageSizeFlagName, 10, - `The page size.`, + `The page size`, ) flagSet.StringVar(&f.PageToken, pageTokenFlagName, "", - `The page token. If more results are available, a "next_page" key is present in the --format=json output.`, + `The page token. If more results are available, a "next_page" key is present in the --format=json output`, ) flagSet.BoolVar(&f.Reverse, reverseFlagName, false, - `Reverse the results.`, + `Reverse the results`, ) flagSet.StringVar( &f.Format, diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/organization/organizationcreate/organizationcreate.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/organization/organizationcreate/organizationcreate.go index 7bacc2771f..30d1005803 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/organization/organizationcreate/organizationcreate.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/organization/organizationcreate/organizationcreate.go @@ -41,7 +41,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "Create a new organization on the BSR.", + Short: "Create a new BSR organization", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/organization/organizationdelete/organizationdelete.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/organization/organizationdelete/organizationdelete.go index 2dd5d62690..8bd3d49a55 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/organization/organizationdelete/organizationdelete.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/organization/organizationdelete/organizationdelete.go @@ -40,7 +40,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "Delete an organization by name on the BSR.", + Short: "Delete a BSR organization", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { @@ -65,7 +65,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.Force, forceFlagName, false, - "Force deletion without confirming. Use with caution.", + "Force deletion without confirming. Use with caution", ) } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/organization/organizationget/organizationget.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/organization/organizationget/organizationget.go index 520eb17ec8..2b0eeea54a 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/organization/organizationget/organizationget.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/organization/organizationget/organizationget.go @@ -41,7 +41,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "Get an organization on the BSR by name.", + Short: "Get a BSR organization", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/plugin/plugincreate/plugincreate.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/plugin/plugincreate/plugincreate.go index 0a0afc2070..cf053a9287 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/plugin/plugincreate/plugincreate.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/plugin/plugincreate/plugincreate.go @@ -53,7 +53,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "Create a new Protobuf plugin.", + Short: "Create a Protobuf plugin", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { @@ -79,7 +79,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.Visibility, visibilityFlagName, "", - fmt.Sprintf(`The plugin's visibility setting. Must be one of %s.`, stringutil.SliceToString(allVisibiltyStrings)), + fmt.Sprintf(`The plugin's visibility setting. Must be one of %s`, stringutil.SliceToString(allVisibiltyStrings)), ) _ = cobra.MarkFlagRequired(flagSet, visibilityFlagName) flagSet.StringVar( diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/plugin/plugindelete/plugindelete.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/plugin/plugindelete/plugindelete.go index 19ddf349ce..27091d3211 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/plugin/plugindelete/plugindelete.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/plugin/plugindelete/plugindelete.go @@ -41,7 +41,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "Delete a Protobuf plugin by name.", + Short: "Delete a Protobuf plugin", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { @@ -66,7 +66,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.Force, forceFlagName, false, - "Force deletion without confirming. Use with caution.", + "Force deletion without confirming. Use with caution", ) } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/plugin/plugindeprecate/plugindeprecate.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/plugin/plugindeprecate/plugindeprecate.go index 586ae1c29b..79d6400288 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/plugin/plugindeprecate/plugindeprecate.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/plugin/plugindeprecate/plugindeprecate.go @@ -41,7 +41,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "Deprecate a plugin by name.", + Short: "Deprecate a Protobuf plugin", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { @@ -66,7 +66,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.Message, messageFlagName, "", - `The message to display with deprecation warnings.`, + `The message to display with deprecation warnings`, ) } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/plugin/pluginlist/pluginlist.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/plugin/pluginlist/pluginlist.go index b7435d5382..086da609fa 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/plugin/pluginlist/pluginlist.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/plugin/pluginlist/pluginlist.go @@ -46,7 +46,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "List plugins on the specified remote.", + Short: "List plugins on the specified BSR", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { @@ -73,17 +73,17 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { flagSet.Uint32Var(&f.PageSize, pageSizeFlagName, 10, - `The page size.`, + `The page size`, ) flagSet.StringVar(&f.PageToken, pageTokenFlagName, "", - `The page token. If more results are available, a "next_page" key is present in the --format=json output.`, + `The page token. If more results are available, a "next_page" key is present in the --format=json output`, ) flagSet.BoolVar(&f.Reverse, reverseFlagName, false, - `Reverse the results.`, + `Reverse the results`, ) flagSet.StringVar( &f.Format, diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/plugin/pluginundeprecate/pluginundeprecate.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/plugin/pluginundeprecate/pluginundeprecate.go index c9b90935d6..0aeb31b4f9 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/plugin/pluginundeprecate/pluginundeprecate.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/plugin/pluginundeprecate/pluginundeprecate.go @@ -35,7 +35,7 @@ func NewCommand( ) *appcmd.Command { return &appcmd.Command{ Use: name + " ", - Short: "Undeprecate a plugin by name.", + Short: "Undeprecate a plugin", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc(run, bufcli.NewErrorInterceptor()), } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/plugin/pluginversion/pluginversionlist/pluginversionlist.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/plugin/pluginversion/pluginversionlist/pluginversionlist.go index 9a9d96c385..d76bd15621 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/plugin/pluginversion/pluginversionlist/pluginversionlist.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/plugin/pluginversion/pluginversionlist/pluginversionlist.go @@ -46,7 +46,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "List versions for the specified plugin.", + Short: "List plugin versions", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { @@ -73,17 +73,17 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { flagSet.Uint32Var(&f.PageSize, pageSizeFlagName, 10, - `The page size.`, + `The page size`, ) flagSet.StringVar(&f.PageToken, pageTokenFlagName, "", - `The page token. If more results are available, a "next_page" key is present in the --format=json output.`, + `The page token. If more results are available, a "next_page" key is present in the --format=json output`, ) flagSet.BoolVar(&f.Reverse, reverseFlagName, false, - `Reverse the results.`, + `Reverse the results`, ) flagSet.StringVar( &f.Format, diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/repository/repositorycreate/repositorycreate.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/repository/repositorycreate/repositorycreate.go index d8dcbe09d7..78f3329b78 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/repository/repositorycreate/repositorycreate.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/repository/repositorycreate/repositorycreate.go @@ -44,7 +44,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "Create a new repository on the BSR.", + Short: "Create a BSR repository", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { @@ -72,7 +72,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.Format, formatFlagName, bufprint.FormatText.String(), - fmt.Sprintf(`The output format to use. Must be one of %s.`, bufprint.AllFormatsString), + fmt.Sprintf(`The output format to use. Must be one of %s`, bufprint.AllFormatsString), ) } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/repository/repositorydelete/repositorydelete.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/repository/repositorydelete/repositorydelete.go index 5197e89cd2..d6cf022225 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/repository/repositorydelete/repositorydelete.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/repository/repositorydelete/repositorydelete.go @@ -40,7 +40,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "Delete a BSR repository by name.", + Short: "Delete a BSR repository", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { @@ -65,7 +65,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.Force, forceFlagName, false, - "Force deletion without confirming. Use with caution.", + "Force deletion without confirming. Use with caution", ) } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/repository/repositorydeprecate/repositorydeprecate.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/repository/repositorydeprecate/repositorydeprecate.go index 879a95bb59..4745b61f59 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/repository/repositorydeprecate/repositorydeprecate.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/repository/repositorydeprecate/repositorydeprecate.go @@ -39,7 +39,7 @@ func NewCommand(name string, builder appflag.Builder) *appcmd.Command { flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "Deprecate a repository on the BSR.", + Short: "Deprecate a BSR repository", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { @@ -64,7 +64,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.Message, messageFlagName, "", - `The message to display with deprecation warnings.`, + `The message to display with deprecation warnings`, ) } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/repository/repositoryget/repositoryget.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/repository/repositoryget/repositoryget.go index 3a16bb486d..a7874ec21e 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/repository/repositoryget/repositoryget.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/repository/repositoryget/repositoryget.go @@ -41,7 +41,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "Get a BSR repository by name.", + Short: "Get a BSR repository", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/repository/repositorylist/repositorylist.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/repository/repositorylist/repositorylist.go index de9565248f..c26c1f66d4 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/repository/repositorylist/repositorylist.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/repository/repositorylist/repositorylist.go @@ -46,7 +46,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "List BSR repositories.", + Short: "List BSR repositories", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { @@ -78,12 +78,12 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { flagSet.StringVar(&f.PageToken, pageTokenFlagName, "", - `The page token. If more results are available, a "next_page" key is present in the --format=json output.`, + `The page token. If more results are available, a "next_page" key is present in the --format=json output`, ) flagSet.BoolVar(&f.Reverse, reverseFlagName, false, - `Reverse the results.`, + `Reverse the results`, ) flagSet.StringVar( &f.Format, diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/repository/repositoryundeprecate/repositoryundeprecate.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/repository/repositoryundeprecate/repositoryundeprecate.go index 2d13326b33..3f50027cd3 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/repository/repositoryundeprecate/repositoryundeprecate.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/repository/repositoryundeprecate/repositoryundeprecate.go @@ -33,7 +33,7 @@ import ( func NewCommand(name string, builder appflag.Builder) *appcmd.Command { return &appcmd.Command{ Use: name + " ", - Short: "Undeprecate a BSR repository.", + Short: "Undeprecate a BSR repository", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc(run, bufcli.NewErrorInterceptor()), } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/repository/repositoryupdate/repositoryupdate.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/repository/repositoryupdate/repositoryupdate.go index 0e3cdaf61c..35e0eda7d0 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/repository/repositoryupdate/repositoryupdate.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/repository/repositoryupdate/repositoryupdate.go @@ -39,7 +39,7 @@ func NewCommand(name string, builder appflag.Builder) *appcmd.Command { flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "Update a BSR repository settings.", + Short: "Update BSR repository settings", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/tag/tagcreate/tagcreate.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/tag/tagcreate/tagcreate.go index 9cb34c1eec..035b278c05 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/tag/tagcreate/tagcreate.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/tag/tagcreate/tagcreate.go @@ -41,7 +41,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "Create a tag for the specified commit.", + Short: "Create a tag for a specified commit", Args: cobra.ExactArgs(2), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { @@ -66,7 +66,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.Format, formatFlagName, bufprint.FormatText.String(), - fmt.Sprintf(`The output format to use. Must be one of %s.`, bufprint.AllFormatsString), + fmt.Sprintf(`The output format to use. Must be one of %s`, bufprint.AllFormatsString), ) } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/tag/taglist/taglist.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/tag/taglist/taglist.go index 2ca5fa79b7..b6b80972a6 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/tag/taglist/taglist.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/tag/taglist/taglist.go @@ -46,7 +46,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "List tags for the specified repository.", + Short: "List repository tags", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { @@ -78,12 +78,12 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { flagSet.StringVar(&f.PageToken, pageTokenFlagName, "", - `The page token. If more results are available, a "next_page" key is present in the --format=json output.`, + `The page token. If more results are available, a "next_page" key is present in the --format=json output`, ) flagSet.BoolVar(&f.Reverse, reverseFlagName, false, - `Reverse the results.`, + `Reverse the results`, ) flagSet.StringVar( &f.Format, diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/template/templatecreate/templatecreate.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/template/templatecreate/templatecreate.go index ea3587261f..c2c9300a5f 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/template/templatecreate/templatecreate.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/template/templatecreate/templatecreate.go @@ -54,7 +54,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "Create a new Buf template.", + Short: "Create a Buf template", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { @@ -81,14 +81,14 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.Config, configFlagName, "", - `The template file or data to use for configuration. Must be in either YAML or JSON format.`, + `The template file or data to use for configuration. Must be in either YAML or JSON format`, ) _ = cobra.MarkFlagRequired(flagSet, configFlagName) flagSet.StringVar( &f.Visibility, visibilityFlagName, "", - fmt.Sprintf(`The template's visibility setting. Must be one of %s.`, stringutil.SliceToString(allVisibiltyStrings)), + fmt.Sprintf(`The template's visibility setting. Must be one of %s`, stringutil.SliceToString(allVisibiltyStrings)), ) _ = cobra.MarkFlagRequired(flagSet, visibilityFlagName) flagSet.StringVar( diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/template/templatedelete/templatedelete.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/template/templatedelete/templatedelete.go index 867feeeb9d..dcb2a01f7e 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/template/templatedelete/templatedelete.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/template/templatedelete/templatedelete.go @@ -41,7 +41,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "Delete a template by name.", + Short: "Delete a template", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { @@ -66,7 +66,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.Force, forceFlagName, false, - "Force deletion without confirming. Use with caution.", + "Force deletion without confirming. Use with caution", ) } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/template/templatedeprecate/templatedeprecate.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/template/templatedeprecate/templatedeprecate.go index 8d859cd416..9c85f5aaf1 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/template/templatedeprecate/templatedeprecate.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/template/templatedeprecate/templatedeprecate.go @@ -41,7 +41,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "Deprecate a template by name.", + Short: "Deprecate a template", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { @@ -66,7 +66,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.Message, messageFlagName, "", - `The message to display with deprecation warnings.`, + `The message to display with deprecation warnings`, ) } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/template/templatelist/templatelist.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/template/templatelist/templatelist.go index 10e43b4c16..d9ae3dcddf 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/template/templatelist/templatelist.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/template/templatelist/templatelist.go @@ -46,7 +46,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "List templates on the specified remote.", + Short: "List templates on the specified BSR", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { @@ -78,7 +78,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { flagSet.StringVar(&f.PageToken, pageTokenFlagName, "", - `The page token. If more results are available, a "next_page" key is present in the --format=json output.`, + `The page token. If more results are available, a "next_page" key is present in the --format=json output`, ) flagSet.BoolVar(&f.Reverse, reverseFlagName, diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/template/templateundeprecate/templateundeprecate.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/template/templateundeprecate/templateundeprecate.go index fcf389a6f2..09b7e8d7bd 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/template/templateundeprecate/templateundeprecate.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/template/templateundeprecate/templateundeprecate.go @@ -35,7 +35,7 @@ func NewCommand( ) *appcmd.Command { return &appcmd.Command{ Use: name + " ", - Short: "Undeprecate a template by name.", + Short: "Undeprecate a template", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc(run, bufcli.NewErrorInterceptor()), } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/template/templateversion/templateversioncreate/templateversioncreate.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/template/templateversion/templateversioncreate/templateversioncreate.go index c09478879a..d855d47ad7 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/template/templateversion/templateversioncreate/templateversioncreate.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/template/templateversion/templateversioncreate/templateversioncreate.go @@ -45,7 +45,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "Create a new template version.", + Short: "Create a new template version", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { @@ -72,7 +72,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.Config, configFlagName, "", - "The template file or data to use for configuration. Must be in either YAML or JSON format.", + "The template file or data to use for configuration. Must be in either YAML or JSON format", ) _ = cobra.MarkFlagRequired(flagSet, configFlagName) flagSet.StringVar( diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/template/templateversion/templateversionlist/templateversionlist.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/template/templateversion/templateversionlist/templateversionlist.go index 4b6dcedbf5..993cea13e1 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/template/templateversion/templateversionlist/templateversionlist.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/template/templateversion/templateversionlist/templateversionlist.go @@ -46,7 +46,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "List versions for the specified template.", + Short: "List versions for the specified template", Args: cobra.ExactArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { @@ -78,7 +78,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { flagSet.StringVar(&f.PageToken, pageTokenFlagName, "", - `The page token. If more results are available, a "next_page" key is present in the --format=json output.`, + `The page token. If more results are available, a "next_page" key is present in the --format=json output`, ) flagSet.BoolVar(&f.Reverse, reverseFlagName, diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/webhook/webhookcreate/webhookcreate.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/webhook/webhookcreate/webhookcreate.go index fcc198d507..2ec27d966c 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/webhook/webhookcreate/webhookcreate.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/webhook/webhookcreate/webhookcreate.go @@ -46,7 +46,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name, - Short: "Create a repository webhook.", + Short: "Create a repository webhook", Args: cobra.ExactArgs(0), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { @@ -75,35 +75,35 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.WebhookEvent, webhookEventFlagName, "", - "The event type to create a webhook for. The proto enum string value is used for this input (e.g. 'WEBHOOK_EVENT_REPOSITORY_PUSH').", + "The event type to create a webhook for. The proto enum string value is used for this input (e.g. 'WEBHOOK_EVENT_REPOSITORY_PUSH')", ) _ = cobra.MarkFlagRequired(flagSet, webhookEventFlagName) flagSet.StringVar( &f.OwnerName, ownerFlagName, "", - `The owner name of the repository to create a webhook for.`, + `The owner name of the repository to create a webhook for`, ) _ = cobra.MarkFlagRequired(flagSet, ownerFlagName) flagSet.StringVar( &f.RepositoryName, repositoryFlagName, "", - "The repository name to create a webhook for.", + "The repository name to create a webhook for", ) _ = cobra.MarkFlagRequired(flagSet, repositoryFlagName) flagSet.StringVar( &f.CallbackURL, callbackURLFlagName, "", - "The url for the webhook to callback to on a given event.", + "The url for the webhook to callback to on a given event", ) _ = cobra.MarkFlagRequired(flagSet, callbackURLFlagName) flagSet.StringVar( &f.Remote, remoteFlagName, "", - "The remote of the repository the created webhook will belong to.", + "The remote of the repository the created webhook will belong to", ) _ = cobra.MarkFlagRequired(flagSet, remoteFlagName) } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/webhook/webhookdelete/webhookdelete.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/webhook/webhookdelete/webhookdelete.go index caebab66e4..2858775516 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/webhook/webhookdelete/webhookdelete.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/webhook/webhookdelete/webhookdelete.go @@ -41,7 +41,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name, - Short: "Delete a repository webhook.", + Short: "Delete a repository webhook", Args: cobra.ExactArgs(0), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { @@ -67,14 +67,14 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.WebhookID, webhookIDFlagName, "", - "The webhook ID to delete.", + "The webhook ID to delete", ) _ = cobra.MarkFlagRequired(flagSet, webhookIDFlagName) flagSet.StringVar( &f.Remote, remoteFlagName, "", - "The remote of the repository the webhook ID belongs to.", + "The remote of the repository the webhook ID belongs to", ) _ = cobra.MarkFlagRequired(flagSet, remoteFlagName) } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/webhook/webhooklist/webhooklist.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/webhook/webhooklist/webhooklist.go index a653eccd90..5a8b465b2f 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/webhook/webhooklist/webhooklist.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/registry/webhook/webhooklist/webhooklist.go @@ -43,7 +43,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name, - Short: "List repository webhooks.", + Short: "List repository webhooks", Args: cobra.ExactArgs(0), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { @@ -70,7 +70,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.OwnerName, ownerFlagName, "", - `The owner name of the repository to list webhooks for.`, + `The owner name of the repository to list webhooks for`, ) _ = cobra.MarkFlagRequired(flagSet, ownerFlagName) flagSet.StringVar( @@ -84,7 +84,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.Remote, remoteFlagName, "", - "The remote of the owner and repository to list webhooks for.", + "The remote of the owner and repository to list webhooks for", ) _ = cobra.MarkFlagRequired(flagSet, remoteFlagName) } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/studioagent/studioagent.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/studioagent/studioagent.go index 3becba367e..cdc435c881 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/studioagent/studioagent.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/beta/studioagent/studioagent.go @@ -52,7 +52,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name, - Short: "Run an HTTP(S) server as the Studio agent.", + Short: "Run an HTTP(S) server as the Studio agent", Args: cobra.ExactArgs(0), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { @@ -86,61 +86,61 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.BindAddress, bindFlagName, "127.0.0.1", - "The address to be exposed to accept HTTP requests.", + "The address to be exposed to accept HTTP requests", ) flagSet.StringVar( &f.Port, portFlagName, "8080", - "The port to be exposed to accept HTTP requests.", + "The port to be exposed to accept HTTP requests", ) flagSet.StringVar( &f.Origin, originFlagName, "https://studio.buf.build", - "The allowed origin for CORS options.", + "The allowed origin for CORS options", ) flagSet.StringSliceVar( &f.DisallowedHeaders, disallowedHeadersFlagName, nil, - `The header names that are disallowed by this agent. When the agent receives an enveloped request with these headers set, it will return an error rather than forward the request to the target server. Multiple headers are appended if specified multiple times.`, + `The header names that are disallowed by this agent. When the agent receives an enveloped request with these headers set, it will return an error rather than forward the request to the target server. Multiple headers are appended if specified multiple times`, ) flagSet.StringToStringVar( &f.ForwardHeaders, forwardHeadersFlagName, nil, - `The headers to be forwarded via the agent to the target server. Must be an equals sign separated key-value pair (like --forward-header=fromHeader1=toHeader1). Multiple header pairs are appended if specified multiple times.`, + `The headers to be forwarded via the agent to the target server. Must be an equals sign separated key-value pair (like --forward-header=fromHeader1=toHeader1). Multiple header pairs are appended if specified multiple times`, ) flagSet.StringVar( &f.CACert, caCertFlagName, "", - "The CA cert to be used in the client and server TLS configuration.", + "The CA cert to be used in the client and server TLS configuration", ) flagSet.StringVar( &f.ClientCert, clientCertFlagName, "", - "The cert to be used in the client TLS configuration.", + "The cert to be used in the client TLS configuration", ) flagSet.StringVar( &f.ClientKey, clientKeyFlagName, "", - "The key to be used in the client TLS configuration.", + "The key to be used in the client TLS configuration", ) flagSet.StringVar( &f.ServerCert, serverCertFlagName, "", - "The cert to be used in the server TLS configuration.", + "The cert to be used in the server TLS configuration", ) flagSet.StringVar( &f.ServerKey, serverKeyFlagName, "", - "The key to be used in the server TLS configuration.", + "The key to be used in the server TLS configuration", ) } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/breaking/breaking.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/breaking/breaking.go index 78aa9343f5..d0d694f75d 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/breaking/breaking.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/breaking/breaking.go @@ -53,9 +53,10 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " --against ", - Short: "Verify that the input location has no breaking changes compared to the against location.", - Long: bufcli.GetInputLong(`the source, module, or image to check for breaking changes`), - Args: cobra.MaximumNArgs(1), + Short: "Verify no breaking changes have been made", + Long: `buf breaking makes sure that the location has no breaking changes compared to the location` + + bufcli.GetInputLong(`the source, module, or image to check for breaking changes`), + Args: cobra.MaximumNArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { return run(ctx, container, flags) @@ -94,7 +95,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { errorFormatFlagName, "text", fmt.Sprintf( - "The format for build errors or check violations printed to stdout. Must be one of %s.", + "The format for build errors or check violations printed to stdout. Must be one of %s", stringutil.SliceToString(bufanalysis.AllFormatStrings), ), ) @@ -109,9 +110,9 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { limitToInputFilesFlagName, false, fmt.Sprintf( - `Only run breaking checks against the files in the input. -When set, the against input contains only the files in the input. -Overrides --%s.`, + `Only run breaking checks against the files in the input +When set, the against input contains only the files in the input +Overrides --%s`, pathsFlagName, ), ) @@ -119,14 +120,14 @@ Overrides --%s.`, &f.Config, configFlagName, "", - `The file or data to use for configuration.`, + `The file or data to use for configuration`, ) flagSet.StringVar( &f.Against, againstFlagName, "", fmt.Sprintf( - `Required. The source, module, or image to check against. Must be one of format %s.`, + `Required. The source, module, or image to check against. Must be one of format %s`, buffetch.AllFormatsString, ), ) @@ -134,7 +135,7 @@ Overrides --%s.`, &f.AgainstConfig, againstConfigFlagName, "", - `The file or data to use to configure the against source, module, or image.`, + `The file or data to use to configure the against source, module, or image`, ) } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/convert/convert.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/convert/convert.go index c9f6c956d0..65c484a39c 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/convert/convert.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/convert/convert.go @@ -50,36 +50,33 @@ func NewCommand( Long: ` Use an input proto to interpret a proto/json message and convert it to a different format. -The simplest form is: +Examples: -$ buf convert --type= --from= --to= + $ buf convert --type= --from= --to= - is the same input as any other buf command. -It can be a local .proto file, binary output of "buf build", bsr module or local buf module. -e.g. -$ buf convert example.proto --type=Foo.proto --from=payload.json --to=output.bin +The can be a local .proto file, binary output of "buf build", bsr module or local buf module: -# Other examples + $ buf convert example.proto --type=Foo.proto --from=payload.json --to=output.bin -# All of , "--from" and "to" accept formatting options +All of , "--from" and "to" accept formatting options: -$ buf convert example.proto#format=bin --type=buf.Foo --from=payload#format=json --to=out#format=json + $ buf convert example.proto#format=bin --type=buf.Foo --from=payload#format=json --to=out#format=json -# Both and "--from" accept stdin redirecting +Both and "--from" accept stdin redirecting: -$ buf convert <(buf build -o -)#format=bin --type=foo.Bar --from=<(echo "{\"one\":\"55\"}")#format=json + $ buf convert <(buf build -o -)#format=bin --type=foo.Bar --from=<(echo "{\"one\":\"55\"}")#format=json -# Redirect from stdin to --from +Redirect from stdin to --from: -$ echo "{\"one\":\"55\"}" | buf convert buf.proto --type buf.Foo --from -#format=json + $ echo "{\"one\":\"55\"}" | buf convert buf.proto --type buf.Foo --from -#format=json -# Redirect from stdin to +Redirect from stdin to : -$ buf build -o - | buf convert -#format=bin --type buf.Foo --from=payload.json + $ buf build -o - | buf convert -#format=bin --type buf.Foo --from=payload.json -# Use a module on the bsr +Use a module on the bsr: -buf convert buf.build// --type buf.Foo --from=payload.json + $ buf convert --type buf.Foo --from=payload.json `, Args: cobra.MaximumNArgs(1), Run: builder.NewRunFunc( @@ -113,7 +110,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { errorFormatFlagName, "text", fmt.Sprintf( - "The format for build errors printed to stderr. Must be one of %s.", + "The format for build errors printed to stderr. Must be one of %s", stringutil.SliceToString(bufanalysis.AllFormatStrings), ), ) @@ -128,7 +125,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { fromFlagName, "-", fmt.Sprintf( - `The location of the payload to be converted. Supported formats are %s.`, + `The location of the payload to be converted. Supported formats are %s`, bufconvert.MessageEncodingFormatsString, ), ) @@ -137,7 +134,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { outputFlagName, "-", fmt.Sprintf( - `The output location of the conversion. Supported formats are %s.`, + `The output location of the conversion. Supported formats are %s`, bufconvert.MessageEncodingFormatsString, ), ) diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/curl/curl.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/curl/curl.go index c21372b896..917ad09afe 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/curl/curl.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/curl/curl.go @@ -86,7 +86,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "Invoke an RPC endpoint, a la 'cURL'.", + Short: "Invoke an RPC endpoint, a la 'cURL'", Long: `This command helps you invoke HTTP RPC endpoints on a server that uses gRPC or Connect. By default, server reflection is used, unless the --reflect flag is set to false. Without server @@ -126,34 +126,34 @@ Examples: Issue a unary RPC to a plain-text (i.e. "h2c") gRPC server, where the schema for the service is in a Buf module in the current directory, using an empty request message: - buf curl --schema . --protocol grpc --http2-prior-knowledge \ - http://localhost:20202/foo.bar.v1.FooService/DoSomething + $ buf curl --schema . --protocol grpc --http2-prior-knowledge \ + http://localhost:20202/foo.bar.v1.FooService/DoSomething Issue an RPC to a Connect server, where the schema comes from the Buf Schema Registry, using a request that is defined as a command-line argument: - buf curl --schema buf.build/bufbuild/eliza \ - --data '{"name": "Bob Loblaw"}' \ - https://demo.connect.build/buf.connect.demo.eliza.v1.ElizaService/Introduce + $ buf curl --schema buf.build/bufbuild/eliza \ + --data '{"name": "Bob Loblaw"}' \ + https://demo.connect.build/buf.connect.demo.eliza.v1.ElizaService/Introduce Issue a unary RPC to a server that supports reflection, with verbose output: - buf curl --data '{"sentence": "I am not feeling well."}' -v \ - https://demo.connect.build/buf.connect.demo.eliza.v1.ElizaService/Say + $ buf curl --data '{"sentence": "I am not feeling well."}' -v \ + https://demo.connect.build/buf.connect.demo.eliza.v1.ElizaService/Say Issue a client-streaming RPC to a gRPC-web server that supports reflection, where custom headers and request data are both in a heredoc: - buf curl --data @- --header @- --protocol grpcweb \ - https://demo.connect.build/buf.connect.demo.eliza.v1.ElizaService/Converse \ - < ar other buf sub-commands such as build and generate. It can indicate a directory, a file, a remote module in the Buf Schema Registry, or even standard in ("-") for feeding an image or file descriptor set to the command in a shell pipeline. -Setting this flags implies --%s=false.`, +Setting this flags implies --%s=false`, reflectFlagName, ), ) @@ -236,7 +236,7 @@ Setting this flags implies --%s=false.`, &f.Reflect, reflectFlagName, true, - `If true, use server reflection to determine the schema.`, + `If true, use server reflection to determine the schema`, ) flagSet.StringSliceVar( &f.ReflectHeaders, @@ -250,7 +250,7 @@ flags) should also be included with reflection requests. A special value of '@

. If the path is "-" then headers are read from stdin. It is not allowed to indicate a file with the same path as used with the request data flag (--data or -d). Furthermore, it is not allowed to indicate stdin -if the schema is expected to be provided via stdin as a file descriptor set or image.`, +if the schema is expected to be provided via stdin as a file descriptor set or image`, ) flagSet.StringVar( &f.ReflectProtocol, @@ -264,30 +264,30 @@ and "grpc-v1alpha" is used if it doesn't work. If newer reflection protocols are they may be preferred in the absence of this flag being explicitly set to a specific protocol. The valid values for this flag are "grpc-v1" and "grpc-v1alpha". These correspond to services named "grpc.reflection.v1.ServerReflection" and "grpc.reflection.v1alpha.ServerReflection" -respectively.`, +respectively`, ) flagSet.StringVar( &f.Protocol, protocolFlagName, connect.ProtocolConnect, - `The RPC protocol to use. This can be one of "grpc", "grpcweb", or "connect".`, + `The RPC protocol to use. This can be one of "grpc", "grpcweb", or "connect"`, ) flagSet.StringVar( &f.UnixSocket, unixSocketFlagName, "", `The path to a unix socket that will be used instead of opening a TCP socket to the host -and port indicated in the URL.`, +and port indicated in the URL`, ) flagSet.BoolVar( &f.HTTP2PriorKnowledge, http2PriorKnowledgeFlagName, false, - `This flag can be used with URLs that use the http scheme (as opposed to http) to indicate + `This flag can be used with URLs that use the http scheme (as opposed to https) to indicate that HTTP/2 should be used. Without this, HTTP 1.1 will be used with URLs with an http scheme. For https scheme, HTTP/2 will be negotiate during the TLS handshake if the server -supports it (otherwise HTTP 1.1 is used).`, +supports it (otherwise HTTP 1.1 is used)`, ) flagSet.BoolVar( @@ -295,20 +295,20 @@ supports it (otherwise HTTP 1.1 is used).`, noKeepAliveFlagName, false, `By default, connections are created using TCP keepalive. If this flag is present, they -will be disabled.`, +will be disabled`, ) flagSet.Float64Var( &f.KeepAliveTimeSeconds, keepAliveFlagName, 60, - `The duration, in seconds, between TCP keepalive transmissions.`, + `The duration, in seconds, between TCP keepalive transmissions`, ) flagSet.Float64Var( &f.ConnectTimeoutSeconds, connectTimeoutFlagName, 0, `The time limit, in seconds, for a connection to be established with the server. There is -no limit if this flag is not present.`, +no limit if this flag is not present`, ) flagSet.StringVar( @@ -318,7 +318,7 @@ no limit if this flag is not present.`, `Path to a PEM-encoded X509 private key file, for using client certificates with TLS. This option is only valid when the URL uses the https scheme. A --cert flag must also be present to provide tha certificate and public key that corresponds to the given -private key.`, +private key`, ) flagSet.StringVarP( &f.Cert, @@ -327,7 +327,7 @@ private key.`, "", `Path to a PEM-encoded X509 certificate file, for using client certificates with TLS. This option is only valid when the URL uses the https scheme. A --key flag must also be -present to provide tha private key that corresponds to the given certificate.`, +present to provide tha private key that corresponds to the given certificate`, ) flagSet.StringVar( &f.CACert, @@ -336,7 +336,7 @@ present to provide tha private key that corresponds to the given certificate.`, `Path to a PEM-encoded X509 certificate pool file that contains the set of trusted certificate authorities/issuers. If omitted, the system's default set of trusted certificates are used to verify the server's certificate. This option is only valid -when the URL uses the https scheme. It is not applicable if --insecure flag is used.`, +when the URL uses the https scheme. It is not applicable if --insecure flag is used`, ) flagSet.BoolVarP( &f.Insecure, @@ -345,7 +345,7 @@ when the URL uses the https scheme. It is not applicable if --insecure flag is u false, `If set, the TLS connection will be insecure and the server's certificate will NOT be verified. This is generally discouraged. This option is only valid when the URL uses -the https scheme.`, +the https scheme`, ) flagSet.StringVar( &f.ServerName, @@ -353,7 +353,7 @@ the https scheme.`, "", `The server name to use in TLS handshakes (for SNI) if the URL scheme is https. If not specified, the default is the origin host in the URL or the value in a "Host" header if -one is provided.`, +one is provided`, ) flagSet.StringVarP( @@ -361,7 +361,7 @@ one is provided.`, userAgentFlagName, "A", "", - `The user agent string to send.`, + `The user agent string to send`, ) flagSet.StringSliceVarP( &f.Headers, @@ -374,7 +374,7 @@ A special value of '@' means to read headers from the file at . If t is "-" then headers are read from stdin. If the same file is indicated as used with the request data flag (--data or -d), the file must contain all headers, then a blank line, and then the request body. It is not allowed to indicate stdin if the schema is expected -to be provided via stdin as a file descriptor set or image.`, +to be provided via stdin as a file descriptor set or image`, ) flagSet.StringVarP( &f.Data, @@ -387,14 +387,14 @@ message. For unary RPCs, there should be exactly one JSON document. A special va request data is read from stdin. If the same file is indicated as used with the request headers flags (--header or -H), the file must contain all headers, then a blank line, and then the request body. It is not allowed to indicate stdin if the schema is expected to be -provided via stdin as a file descriptor set or image.`, +provided via stdin as a file descriptor set or image`, ) flagSet.StringVarP( &f.Output, outputFlagName, "o", "", - `Path to output file to create with response data. If absent, response is printed to stdout.`, + `Path to output file to create with response data. If absent, response is printed to stdout`, ) } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/export/export.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/export/export.go index b737d9d69d..2d0e9d74cb 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/export/export.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/export/export.go @@ -54,30 +54,31 @@ func NewCommand( ) *appcmd.Command { flags := newFlags() return &appcmd.Command{ - Use: name + " ", - Short: "Export the files from the input location to an output location.", - Long: bufcli.GetInputLong(`the source or module to export`) + ` + Use: name + " ", + Short: "Export proto files from one location to another", + Long: bufcli.GetSourceOrModuleLong(`the source or module to export`) + ` Examples: -$ buf export --output= +Export proto files in to an output directory. -input can be of the format [dir,git,mod,protofile,tar,targz,zip]. + $ buf export --output= -output will be a directory with all of the .proto files in the . +Export current directory to another local directory. -# Export current directory to another local directory. -$ buf export . --output= + $ buf export . --output= -# Export the latest remote module to a local directory. -$ buf export buf.build// --output= +Export the latest remote module to a local directory. -# Export a specific version of a remote module to a local directory. -$ buf export buf.build//: --output= + $ buf export --output= -# Export a git repo to a local directory. -$ buf export https:////.git --output= +Export a specific version of a remote module to a local directory. + $ buf export --output= + +Export a git repo to a local directory. + + $ buf export https://github.com/owner/repository.git --output= `, Args: cobra.MaximumNArgs(1), Run: builder.NewRunFunc( @@ -117,14 +118,14 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { outputFlagName, outputFlagShortName, "", - `The output directory for exported files.`, + `The output directory for exported files`, ) _ = cobra.MarkFlagRequired(flagSet, outputFlagName) flagSet.StringVar( &f.Config, configFlagName, "", - `The file or data to use for configuration.`, + `The file or data to use for configuration`, ) } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/format/format.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/format/format.go index 9ebf41a703..8848b64f03 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/format/format.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/format/format.go @@ -64,89 +64,100 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "Format all Protobuf files from the specified input and output the result.", + Short: "Format Protobuf files", Long: ` -By default, the input is the current directory and the formatted content is written to stdout. For example, +By default, the source is the current directory and the formatted content is written to stdout. -# Write the current directory's formatted content to stdout -$ buf format +Examples: -Rewrite the file(s) in-place with -w. For example, +Write the current directory's formatted content to stdout: -# Rewrite the files defined in the current directory in-place -$ buf format -w + $ buf format -Most people will want to use 'buf format -w'. +Most people will want to rewrite the files defined in the current directory in-place with -w: -Display a diff between the original and formatted content with -d. For example, + $ buf format -w -# Write a diff instead of the formatted file -$ buf format simple/simple.proto -d -diff -u simple/simple.proto.orig simple/simple.proto ---- simple/simple.proto.orig 2022-03-24 09:44:10.000000000 -0700 -+++ simple/simple.proto 2022-03-24 09:44:10.000000000 -0700 -@@ -2,8 +2,7 @@ +Display a diff between the original and formatted content with -d +Write a diff instead of the formatted file: + + $ buf format simple/simple.proto -d + + $ diff -u simple/simple.proto.orig simple/simple.proto + --- simple/simple.proto.orig 2022-03-24 09:44:10.000000000 -0700 + +++ simple/simple.proto 2022-03-24 09:44:10.000000000 -0700 + @@ -2,8 +2,7 @@ + + package simple; + + - + message Object { + - string key = 1; + - bytes value = 2; + + string key = 1; + + bytes value = 2; + } - package simple; +Use the --exit-code flag to exit with a non-zero exit code if there is a diff: -- - message Object { -- string key = 1; -- bytes value = 2; -+ string key = 1; -+ bytes value = 2; - } + $ buf format --exit-code + $ buf format -w --exit-code + $ buf format -d --exit-code -You can also use the --exit-code flag to exit with a non-zero exit code if there is a diff: +Format a file, directory, or module reference by specifying a source e.g. +Write the formatted file to stdout: + + $ buf format simple/simple.proto + + syntax = "proto3"; + + package simple; + + message Object { + string key = 1; + bytes value = 2; + } -$ buf format --exit-code -$ buf format -w --exit-code -$ buf format -d --exit-code +Write the formatted directory to stdout: -Format a file, directory, or module reference by specifying an input. For example, + $ buf format simple + ... -# Write the formatted file to stdout -$ buf format simple/simple.proto -syntax = "proto3"; +Write the formatted module reference to stdout: -package simple; + $ buf format buf.build/acme/petapis + ... -message Object { - string key = 1; - bytes value = 2; -} +Write the result to a specified output file or directory with -o e.g. + +Write the formatted file to another file: + + $ buf format simple/simple.proto -o simple/simple.formatted.proto + +Write the formatted directory to another directory, creating it if it doesn't exist: + + $ buf format proto -o formatted -# Write the formatted directory to stdout -$ buf format simple -... +This also works with module references: -# Write the formatted module reference to stdout -$ buf format buf.build/acme/petapis -... + $ buf format buf.build/acme/weather -o formatted -Write the result to a specified output file or directory with -o. For example, +Rewrite the file(s) in-place with -w. e.g. -# Write the formatted file to another file -$ buf format simple/simple.proto -o simple/simple.formatted.proto +Rewrite a single file in-place: -# Write the formatted directory to another directory, creating it if it doesn't exist -$ buf format proto -o formatted + $ buf format simple.proto -w -# This also works with module references -$ buf format buf.build/acme/weather -o formatted +Rewrite an entire directory in-place: -Rewrite the file(s) in-place with -w. For example, + $ buf format proto -w -# Rewrite a single file in-place -$ buf format simple.proto -w +Write a diff and rewrite the file(s) in-place: -# Rewrite an entire directory in-place -$ buf format proto -w + $ buf format simple -d -w -# Write a diff and rewrite the file(s) in-place -$ buf format simple -d -w -diff -u simple/simple.proto.orig simple/simple.proto -... + $ diff -u simple/simple.proto.orig simple/simple.proto + ... The -w and -o flags cannot be used together in a single invocation. `, @@ -189,27 +200,27 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { diffFlagName, diffFlagShortName, false, - "Display diffs instead of rewriting files.", + "Display diffs instead of rewriting files", ) flagSet.BoolVar( &f.ExitCode, exitCodeFlagName, false, - "Exit with a non-zero exit code if files were not already formatted.", + "Exit with a non-zero exit code if files were not already formatted", ) flagSet.BoolVarP( &f.Write, writeFlagName, writeFlagShortName, false, - "Rewrite files in-place.", + "Rewrite files in-place", ) flagSet.StringVar( &f.ErrorFormat, errorFormatFlagName, "text", fmt.Sprintf( - "The format for build errors printed to stderr. Must be one of %s.", + "The format for build errors printed to stderr. Must be one of %s", stringutil.SliceToString(bufanalysis.AllFormatStrings), ), ) @@ -219,7 +230,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { outputFlagShortName, "-", fmt.Sprintf( - `The output location for the formatted files. Must be one of format %s. If omitted, the result is written to stdout.`, + `The output location for the formatted files. Must be one of format %s. If omitted, the result is written to stdout`, buffetch.SourceFormatsString, ), ) @@ -227,7 +238,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.Config, configFlagName, "", - `The file or data to use for configuration.`, + `The file or data to use for configuration`, ) } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/generate/generate.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/generate/generate.go index 67a9095634..2152293e76 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/generate/generate.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/generate/generate.go @@ -44,6 +44,8 @@ const ( includeWKTFlagName = "include-wkt" excludePathsFlagName = "exclude-path" disableSymlinksFlagName = "disable-symlinks" + typeFlagName = "type" + typeDeprecatedFlagName = "include-types" ) // NewCommand returns a new Command. @@ -54,74 +56,76 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "Generate stubs for protoc plugins using a template.", + Short: "Generate code with protoc plugins", Long: `This command uses a template file of the shape: -# The version of the generation template. -# Required. -# The valid values are v1beta1, v1. -version: v1 -# The plugins to run. "plugin" is required. -plugins: - # The name of the plugin. - # By default, buf generate will look for a binary named protoc-gen-NAME on your $PATH. - # Alternatively, use a remote plugin: - # plugin: buf.build/protocolbuffers/go:v1.28.1 - - plugin: go - # The the relative output directory. + # buf.gen.yaml + # The version of the generation template. # Required. - out: gen/go - # Any options to provide to the plugin. - # This can be either a single string or a list of strings. - # Optional. - opt: paths=source_relative - # The custom path to the plugin binary, if not protoc-gen-NAME on your $PATH. - # Optional, and exclusive with "remote". - path: custom-gen-go - # The generation strategy to use. There are two options: - # - # 1. "directory" - # - # This will result in buf splitting the input files by directory, and making separate plugin - # invocations in parallel. This is roughly the concurrent equivalent of: - # - # for dir in $(find . -name '*.proto' -print0 | xargs -0 -n1 dirname | sort | uniq); do - # protoc -I . $(find "${dir}" -name '*.proto') - # done - # - # Almost every Protobuf plugin either requires this, or works with this, - # and this is the recommended and default value. - # - # 2. "all" - # - # This will result in buf making a single plugin invocation with all input files. - # This is roughly the equivalent of: - # - # protoc -I . $(find . -name '*.proto') - # - # This is needed for certain plugins that expect all files to be given at once. - # - # If omitted, "directory" is used. Most users should not need to set this option. - # Optional. - strategy: directory - - plugin: java - out: gen/java - # Use the plugin hosted at buf.build/protocolbuffers/python at version v21.9. - # If version is omitted, uses the latest version of the plugin. - - plugin: buf.build/protocolbuffers/python:v21.9 - out: gen/python + # The valid values are v1beta1, v1. + version: v1 + # The plugins to run. "plugin" is required. + plugins: + # The name of the plugin. + # By default, buf generate will look for a binary named protoc-gen-NAME on your $PATH. + # Alternatively, use a remote plugin: + # plugin: buf.build/protocolbuffers/go:v1.28.1 + - plugin: go + # The the relative output directory. + # Required. + out: gen/go + # Any options to provide to the plugin. + # This can be either a single string or a list of strings. + # Optional. + opt: paths=source_relative + # The custom path to the plugin binary, if not protoc-gen-NAME on your $PATH. + # Optional, and exclusive with "remote". + path: custom-gen-go + # The generation strategy to use. There are two options: + # + # 1. "directory" + # + # This will result in buf splitting the input files by directory, and making separate plugin + # invocations in parallel. This is roughly the concurrent equivalent of: + # + # for dir in $(find . -name '*.proto' -print0 | xargs -0 -n1 dirname | sort | uniq); do + # protoc -I . $(find "${dir}" -name '*.proto') + # done + # + # Almost every Protobuf plugin either requires this, or works with this, + # and this is the recommended and default value. + # + # 2. "all" + # + # This will result in buf making a single plugin invocation with all input files. + # This is roughly the equivalent of: + # + # protoc -I . $(find . -name '*.proto') + # + # This is needed for certain plugins that expect all files to be given at once. + # + # If omitted, "directory" is used. Most users should not need to set this option. + # Optional. + strategy: directory + - plugin: java + out: gen/java + # Use the plugin hosted at buf.build/protocolbuffers/python at version v21.9. + # If version is omitted, uses the latest version of the plugin. + - plugin: buf.build/protocolbuffers/python:v21.9 + out: gen/python As an example, here's a typical "buf.gen.yaml" go and grpc, assuming "protoc-gen-go" and "protoc-gen-go-grpc" are on your "$PATH": -version: v1 -plugins: - - plugin: go - out: gen/go - opt: paths=source_relative - - plugin: go-grpc - out: gen/go - opt: paths=source_relative,require_unimplemented_servers=false + # buf.gen.yaml + version: v1 + plugins: + - plugin: go + out: gen/go + opt: paths=source_relative + - plugin: go-grpc + out: gen/go + opt: paths=source_relative,require_unimplemented_servers=false By default, buf generate will look for a file of this shape named "buf.gen.yaml" in your current directory. This can be thought of as a template @@ -130,36 +134,42 @@ for the set of plugins you want to invoke. The first argument is the source, module, or image to generate from. If no argument is specified, defaults to ".". -Call with: +Use buf.gen.yaml as template, current directory as input: -# uses buf.gen.yaml as template, current directory as input -$ buf generate + $ buf generate -# same as the defaults (template of "buf.gen.yaml", current directory as input) -$ buf generate --template buf.gen.yaml . +Same as the defaults (template of "buf.gen.yaml", current directory as input): -# --template also takes YAML or JSON data as input, so it can be used without a file -$ buf generate --template '{"version":"v1","plugins":[{"plugin":"go","out":"gen/go"}]}' + $ buf generate --template buf.gen.yaml . -# download the repository and generate code stubs per the bar.yaml template -$ buf generate --template bar.yaml https://github.com/foo/bar.git +The --template flag also takes YAML or JSON data as input, so it can be used without a file: -# generate to the bar/ directory, prepending bar/ to the out directives in the template -$ buf generate --template bar.yaml -o bar https://github.com/foo/bar.git + $ buf generate --template '{"version":"v1","plugins":[{"plugin":"go","out":"gen/go"}]}' -The paths in the template and the -o flag will be interpreted as relative to your +Download the repository and generate code stubs per the bar.yaml template: + + $ buf generate --template bar.yaml https://github.com/foo/bar.git + +Generate to the bar/ directory, prepending bar/ to the out directives in the template: + + $ buf generate --template bar.yaml -o bar https://github.com/foo/bar.git + +The paths in the template and the -o flag will be interpreted as relative to the current directory, so you can place your template files anywhere. -If you only want to generate stubs for a subset of your input, you can do so via the --path flag: +If you only want to generate stubs for a subset of your input, you can do so via the --path. e.g. + +Only generate for the files in the directories proto/foo and proto/bar: + + $ buf generate --path proto/foo --path proto/bar -# Only generate for the files in the directories proto/foo and proto/bar -$ buf generate --path proto/foo --path proto/bar +Only generate for the files proto/foo/foo.proto and proto/foo/bar.proto: -# Only generate for the files proto/foo/foo.proto and proto/foo/bar.proto -$ buf generate --path proto/foo/foo.proto --path proto/foo/bar.proto + $ buf generate --path proto/foo/foo.proto --path proto/foo/bar.proto -# Only generate for the files in the directory proto/foo on your GitHub repository -$ buf generate --template buf.gen.yaml https://github.com/foo/bar.git --path proto/foo +Only generate for the files in the directory proto/foo on your git repository: + + $ buf generate --template buf.gen.yaml https://github.com/foo/bar.git --path proto/foo Note that all paths must be contained within the same module. For example, if you have a module in "proto", you cannot specify "--path proto", however "--path proto/foo" is allowed @@ -193,7 +203,10 @@ type flags struct { IncludeWKT bool ExcludePaths []string DisableSymlinks bool - IncludeTypes []string + // We may be able to bind two flags to one string slice but I don't + // want to find out what will break if we do. + Types []string + TypesDeprecated []string // special InputHashtag string } @@ -211,14 +224,14 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.IncludeImports, includeImportsFlagName, false, - "Also generate all imports except for Well-Known Types.", + "Also generate all imports except for Well-Known Types", ) flagSet.BoolVar( &f.IncludeWKT, includeWKTFlagName, false, fmt.Sprintf( - "Also generate Well-Known Types. Cannot be set without --%s.", + "Also generate Well-Known Types. Cannot be set without --%s", includeImportsFlagName, ), ) @@ -226,21 +239,21 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.Template, templateFlagName, "", - `The generation template file or data to use. Must be in either YAML or JSON format.`, + `The generation template file or data to use. Must be in either YAML or JSON format`, ) flagSet.StringVarP( &f.BaseOutDirPath, baseOutDirPathFlagName, baseOutDirPathFlagShortName, ".", - `The base directory to generate to. This is prepended to the out directories in the generation template.`, + `The base directory to generate to. This is prepended to the out directories in the generation template`, ) flagSet.StringVar( &f.ErrorFormat, errorFormatFlagName, "text", fmt.Sprintf( - "The format for build errors, printed to stderr. Must be one of %s.", + "The format for build errors, printed to stderr. Must be one of %s", stringutil.SliceToString(bufanalysis.AllFormatStrings), ), ) @@ -248,14 +261,22 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.Config, configFlagName, "", - `The file or data to use for configuration.`, + `The file or data to use for configuration`, + ) + flagSet.StringSliceVar( + &f.Types, + typeFlagName, + nil, + "The types (message, enum, service) that should be included in this image. When specified, the resulting image will only include descriptors to describe the requested types. Flag usage overrides buf.gen.yaml", ) flagSet.StringSliceVar( - &f.IncludeTypes, - "include-types", + &f.TypesDeprecated, + typeDeprecatedFlagName, nil, "The types (message, enum, service) that should be included in this image. When specified, the resulting image will only include descriptors to describe the requested types. Flag usage overrides buf.gen.yaml", ) + _ = flagSet.MarkDeprecated(typeDeprecatedFlagName, fmt.Sprintf("Use --%s instead", typeFlagName)) + _ = flagSet.MarkHidden(typeDeprecatedFlagName) } func run( @@ -357,9 +378,9 @@ func run( ) } var includedTypes []string - if len(flags.IncludeTypes) > 0 { + if len(flags.Types) > 0 || len(flags.TypesDeprecated) > 0 { // command-line flags take precedence - includedTypes = flags.IncludeTypes + includedTypes = append(flags.Types, flags.TypesDeprecated...) } else if genConfig.TypesConfig != nil { includedTypes = genConfig.TypesConfig.Include } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/lint/lint.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/lint/lint.go index 863b1cffb8..44cb7da0d7 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/lint/lint.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/lint/lint.go @@ -48,7 +48,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "Verify that the input location passes lint checks.", + Short: "Run linting on Protobuf files", Long: bufcli.GetInputLong(`the source, module, or Image to lint`), Args: cobra.MaximumNArgs(1), Run: builder.NewRunFunc( @@ -85,7 +85,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { errorFormatFlagName, "text", fmt.Sprintf( - "The format for build errors or check violations printed to stdout. Must be one of %s.", + "The format for build errors or check violations printed to stdout. Must be one of %s", stringutil.SliceToString(buflint.AllFormatStrings), ), ) @@ -93,7 +93,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.Config, configFlagName, "", - `The file or data to use for configuration.`, + `The file or data to use for configuration`, ) } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/lsfiles/lsfiles.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/lsfiles/lsfiles.go index ad0c51ba39..03da728657 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/lsfiles/lsfiles.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/lsfiles/lsfiles.go @@ -46,7 +46,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "List all Protobuf files for the input.", + Short: "List Protobuf files", Long: bufcli.GetInputLong(`the source, module, or image to list from`), Args: cobra.MaximumNArgs(1), Run: builder.NewRunFunc( @@ -80,20 +80,20 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.AsImportPaths, asImportPathsFlagName, false, - "Strip local directory paths and print filepaths as they are imported.", + "Strip local directory paths and print filepaths as they are imported", ) flagSet.StringVar( &f.Config, configFlagName, "", - `The file or data to use for configuration.`, + `The file or data to use for configuration`, ) flagSet.StringVar( &f.ErrorFormat, errorFormatFlagName, "text", fmt.Sprintf( - "The format for build errors printed to stderr. Must be one of %s.", + "The format for build errors printed to stderr. Must be one of %s", stringutil.SliceToString(bufanalysis.AllFormatStrings), ), ) @@ -101,7 +101,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.IncludeImports, includeImportsFlagName, false, - "Include imports.", + "Include imports", ) } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/internal/internal.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/internal/internal.go index 467b775578..98a001e662 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/internal/internal.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/internal/internal.go @@ -29,7 +29,7 @@ func BindLSRulesAll(flagSet *pflag.FlagSet, addr *bool, flagName string) { addr, flagName, false, - "List all rules and not just those currently configured.", + "List all rules and not just those currently configured", ) } @@ -40,7 +40,7 @@ func BindLSRulesConfig(flagSet *pflag.FlagSet, addr *string, flagName string, al flagName, "", fmt.Sprintf( - `The file or data to use for configuration. Ignored if --%s or --%s is specified.`, + `The file or data to use for configuration. Ignored if --%s or --%s is specified`, allFlagName, versionFlagName, ), @@ -54,7 +54,7 @@ func BindLSRulesFormat(flagSet *pflag.FlagSet, addr *string, flagName string) { flagName, "text", fmt.Sprintf( - "The format to print rules as. Must be one of %s.", + "The format to print rules as. Must be one of %s", stringutil.SliceToString(bufcheck.AllRuleFormatStrings), ), ) @@ -67,7 +67,7 @@ func BindLSRulesVersion(flagSet *pflag.FlagSet, addr *string, flagName string, a flagName, "", // do not set a default as we need to know if this is unset fmt.Sprintf( - "List all the rules for the given configuration version. Implies --%s. Must be one of %s.", + "List all the rules for the given configuration version. Implies --%s. Must be one of %s", allFlagName, stringutil.SliceToString(bufconfig.AllVersions), ), diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/modclearcache/modclearcache.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/modclearcache/modclearcache.go index 15a0e92e76..cd5a522b44 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/modclearcache/modclearcache.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/modclearcache/modclearcache.go @@ -38,7 +38,7 @@ func NewCommand( return &appcmd.Command{ Use: name, Aliases: aliases, - Short: "Clears the Buf module cache.", + Short: "Clear Buf module cache", Args: cobra.NoArgs, Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/modinit/modinit.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/modinit/modinit.go index b237caa497..447ebcad49 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/modinit/modinit.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/modinit/modinit.go @@ -22,6 +22,7 @@ import ( "github.com/bufbuild/buf/private/bufpkg/bufcheck/bufbreaking/bufbreakingconfig" "github.com/bufbuild/buf/private/bufpkg/bufcheck/buflint/buflintconfig" "github.com/bufbuild/buf/private/bufpkg/bufconfig" + "github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmoduleref" "github.com/bufbuild/buf/private/pkg/app/appcmd" "github.com/bufbuild/buf/private/pkg/app/appflag" "github.com/bufbuild/buf/private/pkg/storage/storageos" @@ -43,9 +44,9 @@ func NewCommand( ) *appcmd.Command { flags := newFlags() return &appcmd.Command{ - Use: name, + Use: name + " [buf.build/owner/foobar]", Short: fmt.Sprintf("Initializes and writes a new %s configuration file.", bufconfig.ExternalConfigV1FilePath), - Args: cobra.NoArgs, + Args: cobra.MaximumNArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { return run(ctx, container, flags) @@ -74,20 +75,20 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.DocumentationComments, documentationCommentsFlagName, false, - "Write inline documentation in the form of comments in the resulting configuration file.", + "Write inline documentation in the form of comments in the resulting configuration file", ) flagSet.StringVarP( &f.OutDirPath, outDirPathFlagName, outDirPathFlagShortName, ".", - `The directory to write the configuration file to.`, + `The directory to write the configuration file to`, ) flagSet.BoolVar( &f.Uncomment, uncommentFlagName, false, - "Uncomment examples in the resulting configuration file.", + "Uncomment examples in the resulting configuration file", ) _ = flagSet.MarkHidden(uncommentFlagName) } @@ -116,6 +117,16 @@ func run( return appcmd.NewInvalidArgumentErrorf("%s already exists, not overwriting", existingConfigFilePath) } var writeConfigOptions []bufconfig.WriteConfigOption + if container.NumArgs() > 0 { + moduleIdentity, err := bufmoduleref.ModuleIdentityForString(container.Arg(0)) + if err != nil { + return err + } + writeConfigOptions = append( + writeConfigOptions, + bufconfig.WriteConfigWithModuleIdentity(moduleIdentity), + ) + } if flags.DocumentationComments { writeConfigOptions = append( writeConfigOptions, diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/modlsbreakingrules/modlsbreakingrules.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/modlsbreakingrules/modlsbreakingrules.go index 57bf9f2cb6..b6a4cb0d22 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/modlsbreakingrules/modlsbreakingrules.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/modlsbreakingrules/modlsbreakingrules.go @@ -44,7 +44,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name, - Short: "List breaking rules.", + Short: "List breaking rules", Args: cobra.NoArgs, Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/modlslintrules/modlslintrules.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/modlslintrules/modlslintrules.go index 645837b19c..6c76501bda 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/modlslintrules/modlslintrules.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/modlslintrules/modlslintrules.go @@ -44,7 +44,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name, - Short: "List lint rules.", + Short: "List lint rules", Args: cobra.NoArgs, Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/modopen/modopen.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/modopen/modopen.go index 409a427904..058648d47a 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/modopen/modopen.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/modopen/modopen.go @@ -34,7 +34,7 @@ func NewCommand( ) *appcmd.Command { return &appcmd.Command{ Use: name + " ", - Short: "Open the module's homepage in a web browser.", + Short: "Open the module's homepage in a web browser", Long: `The first argument is the directory of the local module to open. If no argument is specified, defaults to "."`, Args: cobra.MaximumNArgs(1), Run: builder.NewRunFunc( diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/modprune/modprune.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/modprune/modprune.go index fe65ca8a6b..84f771f4e1 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/modprune/modprune.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/modprune/modprune.go @@ -41,8 +41,8 @@ func NewCommand( ) *appcmd.Command { return &appcmd.Command{ Use: name + " ", - Short: "Prunes unused dependencies from the " + buflock.ExternalConfigFilePath + " file.", - Long: `The first argument is the directory of the local module to prune. Defaults to "." if no argument is specified.`, + Short: "Prune unused dependencies from the" + buflock.ExternalConfigFilePath + " file", + Long: `The first argument is the directory of the local module to prune. Defaults to "." if no argument is specified`, Args: cobra.MaximumNArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/modupdate/modupdate.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/modupdate/modupdate.go index 06be3c85c1..b5bcdc823e 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/modupdate/modupdate.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/mod/modupdate/modupdate.go @@ -50,11 +50,11 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "Update a module's dependencies by updating the " + buflock.ExternalConfigFilePath + " file.", + Short: "Update a module's dependencies by updating the " + buflock.ExternalConfigFilePath + " file", Long: "Fetch the latest digests for the specified references in the config file, " + "and write them and their transitive dependencies to the " + buflock.ExternalConfigFilePath + - ` file. The first argument is the directory of the local module to update. Defaults to "." if no argument is specified.`, + ` file. The first argument is the directory of the local module to update. Defaults to "." if no argument is specified`, Args: cobra.MaximumNArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { @@ -79,7 +79,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.Only, onlyFlagName, nil, - "The name of the dependency to update. When set, only this dependency is updated (along with any of its sub-dependencies). May be passed multiple times.", + "The name of the dependency to update. When set, only this dependency is updated (along with any of its sub-dependencies). May be passed multiple times", ) } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/push/push.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/push/push.go index 4f3e8d9a50..fd475bb227 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/push/push.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/push/push.go @@ -21,13 +21,17 @@ import ( "github.com/bufbuild/buf/private/buf/bufcli" "github.com/bufbuild/buf/private/bufpkg/bufanalysis" + "github.com/bufbuild/buf/private/bufpkg/bufmanifest" "github.com/bufbuild/buf/private/bufpkg/bufmodule" + "github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmodulebuild" + "github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmoduleref" "github.com/bufbuild/buf/private/gen/proto/connect/buf/alpha/registry/v1alpha1/registryv1alpha1connect" registryv1alpha1 "github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/registry/v1alpha1" "github.com/bufbuild/buf/private/pkg/app/appcmd" "github.com/bufbuild/buf/private/pkg/app/appflag" "github.com/bufbuild/buf/private/pkg/command" "github.com/bufbuild/buf/private/pkg/connectclient" + "github.com/bufbuild/buf/private/pkg/manifest" "github.com/bufbuild/buf/private/pkg/stringutil" "github.com/bufbuild/connect-go" "github.com/spf13/cobra" @@ -52,7 +56,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: "Push a module to a registry.", + Short: "Push a module to a registry", Long: bufcli.GetSourceLong(`the source to push`), Args: cobra.MaximumNArgs(1), Run: builder.NewRunFunc( @@ -89,7 +93,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { tagFlagShortName, nil, fmt.Sprintf( - "Create a tag for the pushed commit. Multiple tags are created if specified multiple times. Cannot be used together with --%s.", + "Create a tag for the pushed commit. Multiple tags are created if specified multiple times. Cannot be used together with --%s", draftFlagName, ), ) @@ -98,7 +102,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { draftFlagName, "", fmt.Sprintf( - "Make the pushed commit a draft with the specified name. Cannot be used together with --%s (-%s).", + "Make the pushed commit a draft with the specified name. Cannot be used together with --%s (-%s)", tagFlagName, tagFlagShortName, ), @@ -108,7 +112,7 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { errorFormatFlagName, "text", fmt.Sprintf( - "The format for build errors printed to stderr. Must be one of %s.", + "The format for build errors printed to stderr. Must be one of %s", stringutil.SliceToString(bufanalysis.AllFormatStrings), ), ) @@ -155,24 +159,82 @@ func run( return err } moduleIdentity := sourceConfig.ModuleIdentity - module, err := bufcli.ReadModule( + builtModule, err := bufmodulebuild.BuildForBucket( ctx, - container.Logger(), sourceBucket, - sourceConfig, + sourceConfig.Build, ) if err != nil { return err } - protoModule, err := bufmodule.ModuleToProtoModule(ctx, module) + modulePin, err := push(ctx, container, moduleIdentity, builtModule, flags) if err != nil { + if connect.CodeOf(err) == connect.CodeAlreadyExists { + if _, err := container.Stderr().Write( + []byte("The latest commit has the same content; not creating a new commit.\n"), + ); err != nil { + return err + } + return nil + } return err } + if modulePin == nil { + return errors.New("Missing local module pin in the registry's response.") + } + if _, err := container.Stdout().Write([]byte(modulePin.Commit + "\n")); err != nil { + return err + } + return nil +} + +func push( + ctx context.Context, + container appflag.Container, + moduleIdentity bufmoduleref.ModuleIdentity, + builtModule *bufmodulebuild.BuiltModule, + flags *flags, +) (*registryv1alpha1.LocalModulePin, error) { clientConfig, err := bufcli.NewConnectClientConfig(container) if err != nil { - return err + return nil, err } service := connectclient.Make(clientConfig, moduleIdentity.Remote(), registryv1alpha1connect.NewPushServiceClient) + // Check if tamper proofing env var is enabled + tamperProofingEnabled, err := bufcli.IsBetaTamperProofingEnabled(container) + if err != nil { + return nil, err + } + if tamperProofingEnabled { + m, blobSet, err := manifest.NewFromBucket(ctx, builtModule.Bucket) + if err != nil { + return nil, err + } + bucketManifest, blobs, err := bufmanifest.ToProtoManifestAndBlobs(ctx, m, blobSet) + if err != nil { + return nil, err + } + resp, err := service.PushManifestAndBlobs( + ctx, + connect.NewRequest(®istryv1alpha1.PushManifestAndBlobsRequest{ + Owner: moduleIdentity.Owner(), + Repository: moduleIdentity.Repository(), + Manifest: bucketManifest, + Blobs: blobs, + Tags: flags.Tags, + DraftName: flags.Draft, + }), + ) + if err != nil { + return nil, err + } + return resp.Msg.LocalModulePin, nil + } + // Fall back to previous push call + protoModule, err := bufmodule.ModuleToProtoModule(ctx, builtModule.Module) + if err != nil { + return nil, err + } resp, err := service.Push( ctx, connect.NewRequest(®istryv1alpha1.PushRequest{ @@ -184,21 +246,7 @@ func run( }), ) if err != nil { - if connect.CodeOf(err) == connect.CodeAlreadyExists { - if _, err := container.Stderr().Write( - []byte("The latest commit has the same content; not creating a new commit.\n"), - ); err != nil { - return err - } - return nil - } - return err + return nil, err } - if resp.Msg.LocalModulePin == nil { - return errors.New("Missing local module pin in the registry's response.") - } - if _, err := container.Stdout().Write([]byte(resp.Msg.LocalModulePin.Commit + "\n")); err != nil { - return err - } - return nil + return resp.Msg.LocalModulePin, nil } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/registry/registrylogin/registrylogin.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/registry/registrylogin/registrylogin.go index 3fe094130b..80ee6b6428 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/registry/registrylogin/registrylogin.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/registry/registrylogin/registrylogin.go @@ -47,7 +47,7 @@ func NewCommand( flags := newFlags() return &appcmd.Command{ Use: name + " ", - Short: `Log in to the Buf Schema Registry.`, + Short: `Log in to the Buf Schema Registry`, Long: fmt.Sprintf(`This prompts for your BSR username and a BSR token and updates your %s file with these credentials. The argument will default to buf.build if not specified.`, netrc.Filename), Args: cobra.MaximumNArgs(1), @@ -75,13 +75,13 @@ func (f *flags) Bind(flagSet *pflag.FlagSet) { &f.Username, usernameFlagName, "", - "The username to use. This command prompts for a username by default.", + "The username to use. This command prompts for a username by default", ) flagSet.BoolVar( &f.TokenStdin, tokenStdinFlagName, false, - "Read the token from stdin. This command prompts for a token by default.", + "Read the token from stdin. This command prompts for a token by default", ) } diff --git a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/registry/registrylogout/registrylogout.go b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/registry/registrylogout/registrylogout.go index 7e1fea8bc3..1175343d7b 100644 --- a/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/registry/registrylogout/registrylogout.go +++ b/vendor/github.com/bufbuild/buf/private/buf/cmd/buf/command/registry/registrylogout/registrylogout.go @@ -37,8 +37,8 @@ func NewCommand( // Not documenting the first arg (remote) as this is just for testing for now. // TODO: Update when we have self-hosted. Use: name, - Short: `Log out of the Buf Schema Registry.`, - Long: fmt.Sprintf(`This command removes any BSR credentials from your %s file.`, netrc.Filename), + Short: `Log out of the Buf Schema Registry`, + Long: fmt.Sprintf(`This command removes any BSR credentials from your %s file`, netrc.Filename), Args: cobra.MaximumNArgs(1), Run: builder.NewRunFunc( func(ctx context.Context, container appflag.Container) error { @@ -79,9 +79,9 @@ func run( if err != nil { return err } - loggedOutMessage := fmt.Sprintf("All existing BSR credentials removed from %s.\n", netrcFilePath) + loggedOutMessage := fmt.Sprintf("All existing BSR credentials removed from %s\n", netrcFilePath) if !modified1 && !modified2 { - loggedOutMessage = fmt.Sprintf("No BSR credentials found in %s; you are already logged out.\n", netrcFilePath) + loggedOutMessage = fmt.Sprintf("No BSR credentials found in %s; you are already logged out\n", netrcFilePath) } if _, err := container.Stdout().Write([]byte(loggedOutMessage)); err != nil { return err diff --git a/vendor/github.com/bufbuild/buf/private/bufpkg/bufapimodule/bufapimodule.go b/vendor/github.com/bufbuild/buf/private/bufpkg/bufapimodule/bufapimodule.go index e97ad5d629..0d6860559d 100644 --- a/vendor/github.com/bufbuild/buf/private/bufpkg/bufapimodule/bufapimodule.go +++ b/vendor/github.com/bufbuild/buf/private/bufpkg/bufapimodule/bufapimodule.go @@ -40,12 +40,24 @@ func NewRepositoryCommitServiceClientFactory(clientConfig *connectclient.Config) // NewModuleReader returns a new ModuleReader backed by the download service. func NewModuleReader( downloadClientFactory DownloadServiceClientFactory, + opts ...ModuleReaderOption, ) bufmodule.ModuleReader { return newModuleReader( downloadClientFactory, + opts..., ) } +// ModuleReaderOption allows configuration of a module reader. +type ModuleReaderOption func(reader *moduleReader) + +// WithTamperProofing enables tamper proofing support (use of manifest/blobs). +func WithTamperProofing() ModuleReaderOption { + return func(reader *moduleReader) { + reader.tamperProofingEnabled = true + } +} + // NewModuleResolver returns a new ModuleResolver backed by the resolve service. func NewModuleResolver( logger *zap.Logger, diff --git a/vendor/github.com/bufbuild/buf/private/bufpkg/bufapimodule/module_reader.go b/vendor/github.com/bufbuild/buf/private/bufpkg/bufapimodule/module_reader.go index c250bd8119..bc0cf6fc1a 100644 --- a/vendor/github.com/bufbuild/buf/private/bufpkg/bufapimodule/module_reader.go +++ b/vendor/github.com/bufbuild/buf/private/bufpkg/bufapimodule/module_reader.go @@ -16,7 +16,9 @@ package bufapimodule import ( "context" + "errors" + "github.com/bufbuild/buf/private/bufpkg/bufmanifest" "github.com/bufbuild/buf/private/bufpkg/bufmodule" "github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmoduleref" registryv1alpha1 "github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/registry/v1alpha1" @@ -26,17 +28,70 @@ import ( type moduleReader struct { downloadClientFactory DownloadServiceClientFactory + tamperProofingEnabled bool } func newModuleReader( downloadClientFactory DownloadServiceClientFactory, + opts ...ModuleReaderOption, ) *moduleReader { - return &moduleReader{ + reader := &moduleReader{ downloadClientFactory: downloadClientFactory, } + for _, opt := range opts { + opt(reader) + } + return reader } func (m *moduleReader) GetModule(ctx context.Context, modulePin bufmoduleref.ModulePin) (bufmodule.Module, error) { + moduleIdentity, err := bufmoduleref.NewModuleIdentity( + modulePin.Remote(), + modulePin.Owner(), + modulePin.Repository(), + ) + if err != nil { + // malformed pin + return nil, err + } + identityAndCommitOpt := bufmodule.ModuleWithModuleIdentityAndCommit( + moduleIdentity, + modulePin.Commit(), + ) + if m.tamperProofingEnabled { + resp, err := m.downloadManifestAndBlobs(ctx, modulePin) + if err != nil { + return nil, err + } + if resp.Manifest == nil { + return nil, errors.New("expected non-nil manifest with tamper proofing enabled") + } + // use manifest and blobs + bucket, err := bufmanifest.NewBucketFromManifestBlobs( + ctx, + resp.Manifest, + resp.Blobs, + ) + if err != nil { + return nil, err + } + return bufmodule.NewModuleForBucket(ctx, bucket, identityAndCommitOpt) + } + resp, err := m.download(ctx, modulePin) + if err != nil { + return nil, err + } + if resp.Module == nil { + // funny, success without a module to build + return nil, errors.New("no module in response") + } + return bufmodule.NewModuleForProto(ctx, resp.Module, identityAndCommitOpt) +} + +func (m *moduleReader) download( + ctx context.Context, + modulePin bufmoduleref.ModulePin, +) (*registryv1alpha1.DownloadResponse, error) { downloadService := m.downloadClientFactory(modulePin.Remote()) resp, err := downloadService.Download( ctx, @@ -53,16 +108,28 @@ func (m *moduleReader) GetModule(ctx context.Context, modulePin bufmoduleref.Mod } return nil, err } - moduleIdentity, err := bufmoduleref.NewModuleIdentity( - modulePin.Remote(), - modulePin.Owner(), - modulePin.Repository(), + return resp.Msg, err +} + +func (m *moduleReader) downloadManifestAndBlobs( + ctx context.Context, + modulePin bufmoduleref.ModulePin, +) (*registryv1alpha1.DownloadManifestAndBlobsResponse, error) { + downloadService := m.downloadClientFactory(modulePin.Remote()) + resp, err := downloadService.DownloadManifestAndBlobs( + ctx, + connect.NewRequest(®istryv1alpha1.DownloadManifestAndBlobsRequest{ + Owner: modulePin.Owner(), + Repository: modulePin.Repository(), + Reference: modulePin.Commit(), + }), ) if err != nil { + if connect.CodeOf(err) == connect.CodeNotFound { + // Required by ModuleReader interface spec + return nil, storage.NewErrNotExist(modulePin.String()) + } return nil, err } - return bufmodule.NewModuleForProto( - ctx, resp.Msg.Module, - bufmodule.ModuleWithModuleIdentityAndCommit(moduleIdentity, modulePin.Commit()), - ) + return resp.Msg, err } diff --git a/vendor/github.com/bufbuild/buf/private/bufpkg/bufapimodule/module_resolver.go b/vendor/github.com/bufbuild/buf/private/bufpkg/bufapimodule/module_resolver.go index 1d61a892c9..749780a417 100644 --- a/vendor/github.com/bufbuild/buf/private/bufpkg/bufapimodule/module_resolver.go +++ b/vendor/github.com/bufbuild/buf/private/bufpkg/bufapimodule/module_resolver.go @@ -64,8 +64,9 @@ func (m *moduleResolver) GetModulePin(ctx context.Context, moduleReference bufmo moduleReference.Remote(), moduleReference.Owner(), moduleReference.Repository(), - "", + "", // branch resp.Msg.RepositoryCommit.Name, + resp.Msg.RepositoryCommit.Digest, resp.Msg.RepositoryCommit.CreateTime.AsTime(), ) } diff --git a/vendor/github.com/bufbuild/buf/private/bufpkg/bufcheck/bufbreaking/internal/bufbreakingcheck/bufbreakingcheck.go b/vendor/github.com/bufbuild/buf/private/bufpkg/bufcheck/bufbreaking/internal/bufbreakingcheck/bufbreakingcheck.go index 520500fa65..1bb5ec01e1 100644 --- a/vendor/github.com/bufbuild/buf/private/bufpkg/bufcheck/bufbreaking/internal/bufbreakingcheck/bufbreakingcheck.go +++ b/vendor/github.com/bufbuild/buf/private/bufpkg/bufcheck/bufbreaking/internal/bufbreakingcheck/bufbreakingcheck.go @@ -25,7 +25,6 @@ import ( "github.com/bufbuild/buf/private/pkg/protosource" "github.com/bufbuild/buf/private/pkg/stringutil" - "github.com/bufbuild/buf/private/pkg/tagrangeutil" ) // CheckEnumNoDelete is a check function. @@ -963,7 +962,7 @@ var CheckReservedEnumNoDelete = newEnumPairCheckFunc(checkReservedEnumNoDelete) func checkReservedEnumNoDelete(add addFunc, corpus *corpus, previousEnum protosource.Enum, enum protosource.Enum) error { previousRanges := previousEnum.ReservedTagRanges() ranges := enum.ReservedTagRanges() - if isSubset, missing := tagrangeutil.CheckIsSubset(ranges, previousRanges); !isSubset { + if isSubset, missing := protosource.CheckTagRangeIsSubset(ranges, previousRanges); !isSubset { for _, tagRange := range missing { add(enum, nil, enum.Location(), `Previously present reserved range %q on enum %q was deleted.`, protosource.TagRangeString(tagRange), enum.Name()) } @@ -984,7 +983,7 @@ var CheckReservedMessageNoDelete = newMessagePairCheckFunc(checkReservedMessageN func checkReservedMessageNoDelete(add addFunc, corpus *corpus, previousMessage protosource.Message, message protosource.Message) error { previousRanges := previousMessage.ReservedTagRanges() ranges := message.ReservedTagRanges() - if isSubset, missing := tagrangeutil.CheckIsSubset(ranges, previousRanges); !isSubset { + if isSubset, missing := protosource.CheckTagRangeIsSubset(ranges, previousRanges); !isSubset { for _, tagRange := range missing { add(message, nil, message.Location(), `Previously present reserved range %q on message %q was deleted.`, protosource.TagRangeString(tagRange), message.Name()) } diff --git a/vendor/github.com/bufbuild/buf/private/bufpkg/bufcheck/internal/runner.go b/vendor/github.com/bufbuild/buf/private/bufpkg/bufcheck/internal/runner.go index bdcf3741ed..34c238c917 100644 --- a/vendor/github.com/bufbuild/buf/private/bufpkg/bufcheck/internal/runner.go +++ b/vendor/github.com/bufbuild/buf/private/bufpkg/bufcheck/internal/runner.go @@ -23,21 +23,30 @@ import ( "github.com/bufbuild/buf/private/pkg/protosource" "github.com/bufbuild/buf/private/pkg/protoversion" "github.com/bufbuild/buf/private/pkg/stringutil" - "go.opencensus.io/trace" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" "go.uber.org/multierr" "go.uber.org/zap" ) +const ( + tracerName = "bufbuild/buf" +) + // Runner is a runner. type Runner struct { logger *zap.Logger ignorePrefix string + tracer trace.Tracer } // NewRunner returns a new Runner. func NewRunner(logger *zap.Logger, options ...RunnerOption) *Runner { runner := &Runner{ logger: logger, + tracer: otel.GetTracerProvider().Tracer(tracerName), } for _, option := range options { option(runner) @@ -66,11 +75,10 @@ func (r *Runner) Check(ctx context.Context, config *Config, previousFiles []prot if len(rules) == 0 { return nil, nil } - ctx, span := trace.StartSpan(ctx, "check") - span.AddAttributes( - trace.Int64Attribute("num_files", int64(len(files))), - trace.Int64Attribute("num_rules", int64(len(rules))), - ) + ctx, span := r.tracer.Start(ctx, "check", trace.WithAttributes( + attribute.Key("num_files").Int(len(files)), + attribute.Key("num_rules").Int(len(rules)), + )) defer span.End() ignoreFunc := r.newIgnoreFunc(config) @@ -94,6 +102,8 @@ func (r *Runner) Check(ctx context.Context, config *Config, previousFiles []prot } } if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) return nil, err } bufanalysis.SortFileAnnotations(fileAnnotations) diff --git a/vendor/github.com/bufbuild/buf/private/bufpkg/bufconfig/get.go b/vendor/github.com/bufbuild/buf/private/bufpkg/bufconfig/get.go index 2943d8d07f..38895251d0 100644 --- a/vendor/github.com/bufbuild/buf/private/bufpkg/bufconfig/get.go +++ b/vendor/github.com/bufbuild/buf/private/bufpkg/bufconfig/get.go @@ -22,13 +22,20 @@ import ( "github.com/bufbuild/buf/private/pkg/encoding" "github.com/bufbuild/buf/private/pkg/storage" "github.com/bufbuild/buf/private/pkg/stringutil" - "go.opencensus.io/trace" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/codes" "go.uber.org/multierr" ) func getConfigForBucket(ctx context.Context, readBucket storage.ReadBucket) (_ *Config, retErr error) { - ctx, span := trace.StartSpan(ctx, "get_config") + ctx, span := otel.GetTracerProvider().Tracer("bufbuild/buf").Start(ctx, "get_config") defer span.End() + defer func() { + if retErr != nil { + span.RecordError(retErr) + span.SetStatus(codes.Error, retErr.Error()) + } + }() // This will be in the order of precedence. var foundConfigFilePaths []string @@ -73,15 +80,20 @@ func getConfigForBucket(ctx context.Context, readBucket storage.ReadBucket) (_ * } func getConfigForData(ctx context.Context, data []byte) (*Config, error) { - _, span := trace.StartSpan(ctx, "get_config_for_data") + _, span := otel.GetTracerProvider().Tracer("bufbuild/buf").Start(ctx, "get_config_for_data") defer span.End() - return getConfigForDataInternal( + config, err := getConfigForDataInternal( ctx, encoding.UnmarshalJSONOrYAMLNonStrict, encoding.UnmarshalJSONOrYAMLStrict, data, "Configuration data", ) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + } + return config, err } func getConfigForDataInternal( diff --git a/vendor/github.com/bufbuild/buf/private/bufpkg/bufconnect/errors.go b/vendor/github.com/bufbuild/buf/private/bufpkg/bufconnect/errors.go index ad3d1ccfe7..3ecbe37659 100644 --- a/vendor/github.com/bufbuild/buf/private/bufpkg/bufconnect/errors.go +++ b/vendor/github.com/bufbuild/buf/private/bufpkg/bufconnect/errors.go @@ -16,20 +16,20 @@ package bufconnect import "errors" -// ErrAuth wraps the error returned in the auth provider to add additional context. -type ErrAuth struct { +// AuthError wraps the error returned in the auth provider to add additional context. +type AuthError struct { cause error tokenEnvKey string } // Unwrap returns the underlying error. -func (e *ErrAuth) Unwrap() error { +func (e *AuthError) Unwrap() error { return e.cause } // Error implements the error interface and returns the error message. -func (e *ErrAuth) Error() string { +func (e *AuthError) Error() string { if e.cause == nil { return "unknown error" } @@ -37,13 +37,13 @@ func (e *ErrAuth) Error() string { } // TokenEnvKey returns the environment variable used, if any, for authentication. -func (e *ErrAuth) TokenEnvKey() string { +func (e *AuthError) TokenEnvKey() string { return e.tokenEnvKey } -// AsAuthError uses errors.As to unwrap any error and look for an *ErrAuth. -func AsAuthError(err error) (*ErrAuth, bool) { - var authErr *ErrAuth +// AsAuthError uses errors.As to unwrap any error and look for an *AuthError. +func AsAuthError(err error) (*AuthError, bool) { + var authErr *AuthError ok := errors.As(err, &authErr) return authErr, ok } diff --git a/vendor/github.com/bufbuild/buf/private/bufpkg/bufconnect/interceptors.go b/vendor/github.com/bufbuild/buf/private/bufpkg/bufconnect/interceptors.go index 11c8f08378..f76237cf55 100644 --- a/vendor/github.com/bufbuild/buf/private/bufpkg/bufconnect/interceptors.go +++ b/vendor/github.com/bufbuild/buf/private/bufpkg/bufconnect/interceptors.go @@ -16,10 +16,7 @@ package bufconnect import ( "context" - "fmt" - "github.com/bufbuild/buf/private/pkg/app" - "github.com/bufbuild/buf/private/pkg/netrc" "github.com/bufbuild/connect-go" ) @@ -42,38 +39,37 @@ func NewSetCLIVersionInterceptor(version string) connect.UnaryInterceptorFunc { return interceptor } +// TokenProvider finds the token for NewAuthorizationInterceptorProvider. +type TokenProvider interface { + // RemoteToken returns the remote token from the remote address. + RemoteToken(address string) string + // IsFromEnvVar returns true if the TokenProvider is generated from an environment variable. + IsFromEnvVar() bool +} + // NewAuthorizationInterceptorProvider returns a new provider function which, when invoked, returns an interceptor -// which will look up an auth token by address and set it into the request header. This is used for registry providers -// where the token is looked up by the client address at the time of client construction (i.e. for clients where a -// user is already authenticated and the token is stored in .netrc) +// which will set the auth token into the request header by the provided option. // // Note that the interceptor returned from this provider is always applied LAST in the series of interceptors added to // a client. -func NewAuthorizationInterceptorProvider(container app.EnvContainer) func(string) connect.UnaryInterceptorFunc { +func NewAuthorizationInterceptorProvider(tokenProviders ...TokenProvider) func(string) connect.UnaryInterceptorFunc { return func(address string) connect.UnaryInterceptorFunc { interceptor := func(next connect.UnaryFunc) connect.UnaryFunc { return connect.UnaryFunc(func( ctx context.Context, req connect.AnyRequest, ) (connect.AnyResponse, error) { - envKey := tokenEnvKey - token := container.Env(envKey) - if token == "" { - envKey = "" - machine, err := netrc.GetMachineForName(container, address) - if err != nil { - return nil, fmt.Errorf("failed to read server password from netrc: %w", err) - } - if machine != nil { - token = machine.Password() + usingTokenEnvKey := false + for _, tf := range tokenProviders { + if token := tf.RemoteToken(address); token != "" { + req.Header().Set(AuthenticationHeader, AuthenticationTokenPrefix+token) + usingTokenEnvKey = tf.IsFromEnvVar() + break } } - if token != "" { - req.Header().Set(AuthenticationHeader, AuthenticationTokenPrefix+token) - } response, err := next(ctx, req) - if err != nil { - err = &ErrAuth{cause: err, tokenEnvKey: envKey} + if err != nil && usingTokenEnvKey { + err = &AuthError{cause: err, tokenEnvKey: tokenEnvKey} } return response, err }) @@ -81,26 +77,3 @@ func NewAuthorizationInterceptorProvider(container app.EnvContainer) func(string return interceptor } } - -// NewAuthorizationInterceptorProviderWithToken returns a new provider function which, when invoked, returns an -// interceptor which sets the provided auth token into the request header. This is used for registry providers where -// the token is known at provider creation (i.e. when logging in and explicitly pasting a token into stdin -// -// Note that the interceptor returned from this provider is always applied LAST in the series of interceptors added to -// a client. -func NewAuthorizationInterceptorProviderWithToken(token string) func(string) connect.UnaryInterceptorFunc { - return func(address string) connect.UnaryInterceptorFunc { - interceptor := func(next connect.UnaryFunc) connect.UnaryFunc { - return connect.UnaryFunc(func( - ctx context.Context, - req connect.AnyRequest, - ) (connect.AnyResponse, error) { - if token != "" { - req.Header().Set(AuthenticationHeader, AuthenticationTokenPrefix+token) - } - return next(ctx, req) - }) - } - return interceptor - } -} diff --git a/vendor/github.com/bufbuild/buf/private/bufpkg/bufconnect/netrc_token_provider.go b/vendor/github.com/bufbuild/buf/private/bufpkg/bufconnect/netrc_token_provider.go new file mode 100644 index 0000000000..438882567c --- /dev/null +++ b/vendor/github.com/bufbuild/buf/private/bufpkg/bufconnect/netrc_token_provider.go @@ -0,0 +1,46 @@ +// Copyright 2020-2023 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bufconnect + +import ( + "github.com/bufbuild/buf/private/pkg/app" + "github.com/bufbuild/buf/private/pkg/netrc" +) + +// netrcTokenProvider is used to provide remote tokenToAuthKey from .netrc. +type netrcTokenProvider struct { + container app.EnvContainer + getMachineForName func(app.EnvContainer, string) (netrc.Machine, error) +} + +// NewNetrcTokenProvider returns a TokenProvider for a .netrc in a container. +func NewNetrcTokenProvider(container app.EnvContainer, getMachineForName func(app.EnvContainer, string) (netrc.Machine, error)) TokenProvider { + return &netrcTokenProvider{container: container, getMachineForName: getMachineForName} +} + +func (nt *netrcTokenProvider) RemoteToken(address string) string { + machine, err := nt.getMachineForName(nt.container, address) + if err != nil { + return "" + } + if machine != nil { + return machine.Password() + } + return "" +} + +func (nt *netrcTokenProvider) IsFromEnvVar() bool { + return false +} diff --git a/vendor/github.com/bufbuild/buf/private/bufpkg/bufconnect/static_token_provider.go b/vendor/github.com/bufbuild/buf/private/bufpkg/bufconnect/static_token_provider.go new file mode 100644 index 0000000000..eb27380603 --- /dev/null +++ b/vendor/github.com/bufbuild/buf/private/bufpkg/bufconnect/static_token_provider.go @@ -0,0 +1,138 @@ +// Copyright 2020-2023 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bufconnect + +import ( + "errors" + "fmt" + "strings" + + "github.com/bufbuild/buf/private/pkg/app" +) + +// NewTokenProviderFromContainer creates a singleTokenProvider from the BUF_TOKEN environment variable +func NewTokenProviderFromContainer(container app.EnvContainer) (TokenProvider, error) { + return newTokenProviderFromString(container.Env(tokenEnvKey), true) +} + +// NewTokenProviderFromString creates a singleTokenProvider by the token provided +func NewTokenProviderFromString(token string) (TokenProvider, error) { + return newTokenProviderFromString(token, false) +} + +// newTokenProviderFromString returns a TokenProvider with auth keys from the provided token. The +// remote token is in the format: username1:token1@remote1,username2:token2@remote2,defaultToken. +// The special characters `:`, `@` and `,` are used as the splitters. The usernames, tokens, and +// remote addresses does not contain these characters since they are enforced by the rules in BSR. +func newTokenProviderFromString(token string, isFromEnvVar bool) (TokenProvider, error) { + if token == "" { + return nopTokenProvider{}, nil + } + // Tokens for different remotes are separated by `,`. Using strings.Split to separate the string into remote tokens. + tokens := strings.Split(token, ",") + if len(tokens) == 1 { + if strings.Contains(tokens[0], "@") { + return newMultipleTokenProvider(tokens, isFromEnvVar) + } + return newSingleTokenProvider(tokens[0], isFromEnvVar) + } + return newMultipleTokenProvider(tokens, isFromEnvVar) +} + +// singleTokenProvider is used to provide set of authentication tokenToAuthKey. +type singleTokenProvider struct { + // true: the tokenSet is generated from environment variable tokenEnvKey + // false: otherwise + setBufTokenEnvVar bool + token string +} + +func newSingleTokenProvider(token string, isFromEnvVar bool) (*singleTokenProvider, error) { + if strings.Contains(token, "@") { + return nil, errors.New("token cannot contain special character `@`") + } + if strings.Contains(token, ",") { + return nil, errors.New("token cannot contain special character `,`") + } + if strings.Contains(token, ":") { + return nil, errors.New("token cannot contain special character `:`") + } + if token == "" { + return nil, errors.New("single token cannot be empty") + } + return &singleTokenProvider{ + setBufTokenEnvVar: isFromEnvVar, + token: token, + }, nil +} + +// RemoteToken finds the token by the remote address +func (t *singleTokenProvider) RemoteToken(address string) string { + return t.token +} + +func (t *singleTokenProvider) IsFromEnvVar() bool { + return t.setBufTokenEnvVar +} + +type multipleTokenProvider struct { + addressToToken map[string]string + isFromEnvVar bool +} + +func newMultipleTokenProvider(tokens []string, isFromEnvVar bool) (*multipleTokenProvider, error) { + addressToToken := make(map[string]string) + for _, token := range tokens { + split := strings.Split(token, "@") + if len(split) != 2 { + return nil, fmt.Errorf("invalid token: %s", token) + } + if split[0] == "" || split[1] == "" { + return nil, fmt.Errorf("invalid token: %s", token) + } + if strings.Contains(split[0], ":") { + return nil, fmt.Errorf("invalid token: %s, token cannot contain special character `:`", token) + } + if strings.Contains(split[0], ",") { + return nil, fmt.Errorf("invalid token: %s, token cannot contain special character `,`", token) + } + if _, ok := addressToToken[split[1]]; ok { + return nil, fmt.Errorf("invalid token: %s, repeated remote adddress: %s", token, split[1]) + } + addressToToken[split[1]] = split[0] + } + return &multipleTokenProvider{ + addressToToken: addressToToken, + isFromEnvVar: isFromEnvVar, + }, nil +} + +func (m *multipleTokenProvider) RemoteToken(address string) string { + return m.addressToToken[address] +} + +func (m *multipleTokenProvider) IsFromEnvVar() bool { + return m.isFromEnvVar +} + +type nopTokenProvider struct{} + +func (nopTokenProvider) RemoteToken(string) string { + return "" +} + +func (nopTokenProvider) IsFromEnvVar() bool { + return false +} diff --git a/vendor/github.com/bufbuild/buf/private/bufpkg/bufimage/bufimagebuild/builder.go b/vendor/github.com/bufbuild/buf/private/bufpkg/bufimage/bufimagebuild/builder.go index 78d2d39ddc..35583ada57 100644 --- a/vendor/github.com/bufbuild/buf/private/bufpkg/bufimage/bufimagebuild/builder.go +++ b/vendor/github.com/bufbuild/buf/private/bufpkg/bufimage/bufimagebuild/builder.go @@ -29,18 +29,27 @@ import ( "github.com/bufbuild/protocompile/parser" "github.com/bufbuild/protocompile/protoutil" "github.com/bufbuild/protocompile/reporter" - "go.opencensus.io/trace" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" "google.golang.org/protobuf/reflect/protoreflect" ) +const ( + loggerName = "bufimagebuild" + tracerName = "bufbuild/buf" +) + type builder struct { logger *zap.Logger + tracer trace.Tracer } func newBuilder(logger *zap.Logger) *builder { return &builder{ - logger: logger.Named("bufimagebuild"), + logger: logger.Named(loggerName), + tracer: otel.GetTracerProvider().Tracer(tracerName), } } @@ -64,9 +73,15 @@ func (b *builder) build( ctx context.Context, moduleFileSet bufmodule.ModuleFileSet, excludeSourceCodeInfo bool, -) (bufimage.Image, []bufanalysis.FileAnnotation, error) { - ctx, span := trace.StartSpan(ctx, "build") +) (_ bufimage.Image, _ []bufanalysis.FileAnnotation, retErr error) { + ctx, span := b.tracer.Start(ctx, "build") defer span.End() + defer func() { + if retErr != nil { + span.RecordError(retErr) + span.SetStatus(codes.Error, retErr.Error()) + } + }() parserAccessorHandler := bufmoduleprotocompile.NewParserAccessorHandler(ctx, moduleFileSet) targetFileInfos, err := moduleFileSet.TargetFileInfos(ctx) @@ -105,6 +120,7 @@ func (b *builder) build( parserAccessorHandler, buildResult.SyntaxUnspecifiedFilenames, buildResult.FilenameToUnusedDependencyFilenames, + b.tracer, ) if err != nil { return nil, nil, err @@ -272,8 +288,9 @@ func getImage( parserAccessorHandler bufmoduleprotocompile.ParserAccessorHandler, syntaxUnspecifiedFilenames map[string]struct{}, filenameToUnusedDependencyFilenames map[string]map[string]struct{}, + tracer trace.Tracer, ) (bufimage.Image, error) { - ctx, span := trace.StartSpan(ctx, "get_image") + ctx, span := tracer.Start(ctx, "get_image") defer span.End() // if we aren't including imports, then we need a set of file names that @@ -305,10 +322,17 @@ func getImage( imageFiles, ) if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) return nil, err } } - return bufimage.NewImage(imageFiles) + image, err := bufimage.NewImage(imageFiles) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + } + return image, err } func getImageFilesRec( diff --git a/vendor/github.com/bufbuild/buf/private/bufpkg/bufimage/bufimagemodify/php_metadata_namespace.go b/vendor/github.com/bufbuild/buf/private/bufpkg/bufimage/bufimagemodify/php_metadata_namespace.go index cbc6b7635e..d5195fced8 100644 --- a/vendor/github.com/bufbuild/buf/private/bufpkg/bufimage/bufimagemodify/php_metadata_namespace.go +++ b/vendor/github.com/bufbuild/buf/private/bufpkg/bufimage/bufimagemodify/php_metadata_namespace.go @@ -29,7 +29,7 @@ const PhpMetadataNamespaceID = "PHP_METADATA_NAMESPACE" var ( // phpMetadataNamespacePath is the SourceCodeInfo path for the php_metadata_namespace option. // Ref: https://github.com/protocolbuffers/protobuf/blob/61689226c0e3ec88287eaed66164614d9c4f2bf7/src/google/protobuf/descriptor.proto#L448 - phpMetadataNamespacePath = []int32{8, 41} + phpMetadataNamespacePath = []int32{8, 44} ) func phpMetadataNamespace( diff --git a/vendor/github.com/bufbuild/buf/private/bufpkg/bufimage/bufimageutil/bufimageutil.go b/vendor/github.com/bufbuild/buf/private/bufpkg/bufimage/bufimageutil/bufimageutil.go index e6561db118..ef4ffb5e09 100644 --- a/vendor/github.com/bufbuild/buf/private/bufpkg/bufimage/bufimageutil/bufimageutil.go +++ b/vendor/github.com/bufbuild/buf/private/bufpkg/bufimage/bufimageutil/bufimageutil.go @@ -27,6 +27,10 @@ import ( "google.golang.org/protobuf/types/descriptorpb" ) +const ( + anyFullName = "google.protobuf.Any" +) + var ( // ErrImageFilterTypeNotFound is returned from ImageFilteredByTypes when // a specified type cannot be found in an image. @@ -192,25 +196,54 @@ func ImageFilteredByTypesWithOptions(image bufimage.Image, types []string, opts } // Check types exist startingDescriptors := make([]namedDescriptor, 0, len(types)) + var startingPackages []*protoPackage for _, typeName := range types { + // TODO: consider supporting a glob syntax of some kind, to do more advanced pattern + // matching, such as ability to get a package AND all of its sub-packages. startingDescriptor, ok := imageIndex.ByName[typeName] + if ok { + // It's a type name + typeInfo := imageIndex.ByDescriptor[startingDescriptor] + if image.GetFile(typeInfo.file).IsImport() && !options.allowImportedTypes { + return nil, fmt.Errorf("filtering by type %q: %w", typeName, ErrImageFilterTypeIsImport) + } + startingDescriptors = append(startingDescriptors, startingDescriptor) + continue + } + // It could be a package name + pkg, ok := imageIndex.Packages[typeName] if !ok { + // but it's not... return nil, fmt.Errorf("filtering by type %q: %w", typeName, ErrImageFilterTypeNotFound) } - typeInfo := imageIndex.ByDescriptor[startingDescriptor] - if image.GetFile(typeInfo.file).IsImport() && !options.allowImportedTypes { - return nil, fmt.Errorf("filtering by type %q: %w", typeName, ErrImageFilterTypeIsImport) + if !options.allowImportedTypes { + // if package includes only imported files, then reject + onlyImported := true + for _, file := range pkg.files { + if !file.IsImport() { + onlyImported = false + break + } + } + if onlyImported { + return nil, fmt.Errorf("filtering by type %q: %w", typeName, ErrImageFilterTypeIsImport) + } } - startingDescriptors = append(startingDescriptors, startingDescriptor) + startingPackages = append(startingPackages, pkg) } // Find all types to include in filtered image. closure := newTransitiveClosure() + for _, startingPackage := range startingPackages { + if err := closure.addPackage(startingPackage, imageIndex, options); err != nil { + return nil, err + } + } for _, startingDescriptor := range startingDescriptors { if err := closure.addElement(startingDescriptor, "", false, imageIndex, options); err != nil { return nil, err } } - // After all typs are added, add their known extensions + // After all types are added, add their known extensions if err := closure.addExtensions(imageIndex, options); err != nil { return nil, err } @@ -259,12 +292,15 @@ func ImageFilteredByTypesWithOptions(image bufimage.Image, types []string, opts } imageFileDescriptor.WeakDependency = imageFileDescriptor.WeakDependency[:i] - imageFileDescriptor.MessageType = trimMessageDescriptors(imageFileDescriptor.MessageType, closure.elements) - imageFileDescriptor.EnumType = trimSlice(imageFileDescriptor.EnumType, closure.elements) - imageFileDescriptor.Extension = trimSlice(imageFileDescriptor.Extension, closure.elements) - imageFileDescriptor.Service = trimSlice(imageFileDescriptor.Service, closure.elements) - for _, serviceDescriptor := range imageFileDescriptor.Service { - serviceDescriptor.Method = trimSlice(serviceDescriptor.Method, closure.elements) + if _, ok := closure.completeFiles[imageFile.Path()]; !ok { + // if not keeping entire file, filter contents now + imageFileDescriptor.MessageType = trimMessageDescriptors(imageFileDescriptor.MessageType, closure.elements) + imageFileDescriptor.EnumType = trimSlice(imageFileDescriptor.EnumType, closure.elements) + imageFileDescriptor.Extension = trimSlice(imageFileDescriptor.Extension, closure.elements) + imageFileDescriptor.Service = trimSlice(imageFileDescriptor.Service, closure.elements) + for _, serviceDescriptor := range imageFileDescriptor.Service { + serviceDescriptor.Method = trimSlice(serviceDescriptor.Method, closure.elements) + } } // TODO: With some from/to mappings, perhaps even sourcecodeinfo @@ -316,9 +352,17 @@ func trimSlice[T namedDescriptor](in []T, toKeep map[namedDescriptor]closureIncl // subset of an image. When an element is added to the closure, all of its // dependencies are recursively added. type transitiveClosure struct { + // The elements included in the transitive closure. elements map[namedDescriptor]closureInclusionMode - files map[string]struct{} - imports map[string]map[string]struct{} + // The set of files that contain all items in elements. + files map[string]struct{} + // Any files that are part of the closure in their entirety (due to an + // entire package being included). The above fields are used to filter the + // contents of files. But files named in this set will not be filtered. + completeFiles map[string]struct{} + // The set of imports for each file. This allows for re-writing imports + // for files whose contents have been pruned. + imports map[string]map[string]struct{} } type closureInclusionMode int @@ -340,9 +384,10 @@ const ( func newTransitiveClosure() *transitiveClosure { return &transitiveClosure{ - elements: map[namedDescriptor]closureInclusionMode{}, - files: map[string]struct{}{}, - imports: map[string]map[string]struct{}{}, + elements: map[namedDescriptor]closureInclusionMode{}, + files: map[string]struct{}{}, + completeFiles: map[string]struct{}{}, + imports: map[string]map[string]struct{}{}, } } @@ -366,6 +411,25 @@ func (t *transitiveClosure) addFile(file string, imageIndex *imageIndex, opts *i return t.exploreCustomOptions(imageIndex.Files[file], file, imageIndex, opts) } +func (t *transitiveClosure) addPackage( + pkg *protoPackage, + imageIndex *imageIndex, + opts *imageFilterOptions, +) error { + for _, file := range pkg.files { + if err := t.addFile(file.Path(), imageIndex, opts); err != nil { + return err + } + t.completeFiles[file.Path()] = struct{}{} + } + for _, descriptor := range pkg.elements { + if err := t.addElement(descriptor, "", false, imageIndex, opts); err != nil { + return err + } + } + return nil +} + func (t *transitiveClosure) addElement( descriptor namedDescriptor, referrerFile string, @@ -639,6 +703,13 @@ func (t *transitiveClosure) exploreCustomOptions( optionsName := string(options.Descriptor().FullName()) var err error options.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool { + // If the value contains an Any message, we should add the message type + // therein to the closure. + if err = t.exploreOptionValueForAny(fd, val, referrerFile, imageIndex, opts); err != nil { + return false + } + + // Also include custom option definitions (e.g. extensions) if !fd.IsExtension() { return true } @@ -654,6 +725,76 @@ func (t *transitiveClosure) exploreCustomOptions( return err } +func isMessageKind(k protoreflect.Kind) bool { + return k == protoreflect.MessageKind || k == protoreflect.GroupKind +} + +func (t *transitiveClosure) exploreOptionValueForAny( + fd protoreflect.FieldDescriptor, + val protoreflect.Value, + referrerFile string, + imageIndex *imageIndex, + opts *imageFilterOptions, +) error { + switch { + case fd.IsMap() && isMessageKind(fd.MapValue().Kind()): + var err error + val.Map().Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { + if err = t.exploreOptionScalarValueForAny(v.Message(), referrerFile, imageIndex, opts); err != nil { + return false + } + return true + }) + return err + case isMessageKind(fd.Kind()): + if fd.IsList() { + listVal := val.List() + for i := 0; i < listVal.Len(); i++ { + if err := t.exploreOptionScalarValueForAny(listVal.Get(i).Message(), referrerFile, imageIndex, opts); err != nil { + return err + } + } + } else { + return t.exploreOptionScalarValueForAny(val.Message(), referrerFile, imageIndex, opts) + } + } + return nil +} + +func (t *transitiveClosure) exploreOptionScalarValueForAny( + msg protoreflect.Message, + referrerFile string, + imageIndex *imageIndex, + opts *imageFilterOptions, +) error { + md := msg.Descriptor() + if md.FullName() == anyFullName { + // Found one! + typeURLFd := md.Fields().ByNumber(1) + if typeURLFd.Kind() != protoreflect.StringKind { + // should not be possible... + return nil + } + typeURL := msg.Get(typeURLFd).String() + pos := strings.LastIndexByte(typeURL, '/') + msgType := typeURL[pos+1:] + d, _ := imageIndex.ByName[msgType].(*descriptorpb.DescriptorProto) + if d != nil { + if err := t.addElement(d, referrerFile, false, imageIndex, opts); err != nil { + return err + } + } + return nil + } + // keep digging + var err error + msg.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool { + err = t.exploreOptionValueForAny(fd, val, referrerFile, imageIndex, opts) + return err == nil + }) + return err +} + func freeMessageRangeStringsRec( s []string, message protosource.Message, diff --git a/vendor/github.com/bufbuild/buf/private/bufpkg/bufimage/bufimageutil/image_index.go b/vendor/github.com/bufbuild/buf/private/bufpkg/bufimage/bufimageutil/image_index.go index e2d7ec1550..984b20d4ee 100644 --- a/vendor/github.com/bufbuild/buf/private/bufpkg/bufimage/bufimageutil/image_index.go +++ b/vendor/github.com/bufbuild/buf/private/bufpkg/bufimage/bufimageutil/image_index.go @@ -46,6 +46,9 @@ type imageIndex struct { // NameToOptions maps `google.protobuf.*Options` type names to their // known extensions by field tag. NameToOptions map[string]map[int32]*descriptorpb.FieldDescriptorProto + + // Packages maps package names to package contents. + Packages map[string]*protoPackage } type namedDescriptor interface { @@ -67,12 +70,19 @@ type elementInfo struct { parent namedDescriptor } +type protoPackage struct { + files []bufimage.ImageFile + elements []namedDescriptor + subPackages []*protoPackage +} + // newImageIndexForImage builds an imageIndex for a given image. func newImageIndexForImage(image bufimage.Image, opts *imageFilterOptions) (*imageIndex, error) { index := &imageIndex{ ByName: make(map[string]namedDescriptor), ByDescriptor: make(map[namedDescriptor]elementInfo), Files: make(map[string]*descriptorpb.FileDescriptorProto), + Packages: make(map[string]*protoPackage), } if opts.includeCustomOptions { index.NameToOptions = make(map[string]map[int32]*descriptorpb.FieldDescriptorProto) @@ -82,6 +92,8 @@ func newImageIndexForImage(image bufimage.Image, opts *imageFilterOptions) (*ima } for _, file := range image.Files() { + pkg := addPackageToIndex(file.FileDescriptor().GetPackage(), index) + pkg.files = append(pkg.files, file) fileName := file.Path() fileDescriptorProto := file.Proto() index.Files[fileName] = fileDescriptorProto @@ -102,11 +114,27 @@ func newImageIndexForImage(image bufimage.Image, opts *imageFilterOptions) (*ima } } - index.ByName[string(name)] = descriptor - index.ByDescriptor[descriptor] = elementInfo{ - fullName: string(name), - parent: parent, - file: fileName, + // certain descriptor types don't need to be indexed: + // enum values, normal (non-extension) fields, and oneofs + var includeInIndex bool + switch d := descriptor.(type) { + case *descriptorpb.EnumValueDescriptorProto, *descriptorpb.OneofDescriptorProto: + // do not add to package elements; these elements are implicitly included by their enclosing type + case *descriptorpb.FieldDescriptorProto: + // only add to elements if an extension (regular fields implicitly included by containing message) + includeInIndex = d.Extendee != nil + default: + includeInIndex = true + } + + if includeInIndex { + index.ByName[string(name)] = descriptor + index.ByDescriptor[descriptor] = elementInfo{ + fullName: string(name), + parent: parent, + file: fileName, + } + pkg.elements = append(pkg.elements, descriptor) } ext, ok := descriptor.(*descriptorpb.FieldDescriptorProto) @@ -135,6 +163,25 @@ func newImageIndexForImage(image bufimage.Image, opts *imageFilterOptions) (*ima return index, nil } +func addPackageToIndex(pkgName string, index *imageIndex) *protoPackage { + pkg := index.Packages[pkgName] + if pkg != nil { + return pkg + } + pkg = &protoPackage{} + index.Packages[pkgName] = pkg + if pkgName == "" { + return pkg + } + var parentPkgName string + if pos := strings.LastIndexByte(pkgName, '.'); pos != -1 { + parentPkgName = pkgName[:pos] + } + parentPkg := addPackageToIndex(parentPkgName, index) + parentPkg.subPackages = append(parentPkg.subPackages, pkg) + return pkg +} + func isOptionsTypeName(typeName string) bool { switch typeName { case "google.protobuf.FileOptions", diff --git a/vendor/github.com/bufbuild/buf/private/bufpkg/buflock/buflock.go b/vendor/github.com/bufbuild/buf/private/bufpkg/buflock/buflock.go index bcad058c2b..444718fe8d 100644 --- a/vendor/github.com/bufbuild/buf/private/bufpkg/buflock/buflock.go +++ b/vendor/github.com/bufbuild/buf/private/bufpkg/buflock/buflock.go @@ -19,6 +19,7 @@ import ( "context" "time" + "github.com/bufbuild/buf/private/pkg/manifest" "github.com/bufbuild/buf/private/pkg/storage" ) @@ -44,6 +45,7 @@ type Dependency struct { Owner string Repository string Commit string + Digest string } // ReadConfig reads the lock file at ExternalConfigFilePath relative @@ -83,11 +85,17 @@ type ExternalConfigDependencyV1 struct { // DependencyForExternalConfigDependencyV1 returns the Dependency representation of a ExternalConfigDependencyV1. func DependencyForExternalConfigDependencyV1(dep ExternalConfigDependencyV1) Dependency { + // Don't consume old buf digests. + digest := dep.Digest + if !isValidDigest(digest) { + digest = "" + } return Dependency{ Remote: dep.Remote, Owner: dep.Owner, Repository: dep.Repository, Commit: dep.Commit, + Digest: digest, } } @@ -100,6 +108,7 @@ func ExternalConfigDependencyV1ForDependency(dep Dependency) ExternalConfigDepen Owner: dep.Owner, Repository: dep.Repository, Commit: dep.Commit, + Digest: dep.Digest, } } @@ -115,13 +124,14 @@ type ExternalConfigDependencyV1Beta1 struct { CreateTime time.Time `json:"create_time,omitempty" yaml:"create_time,omitempty"` } -// DepedencyForExternalConfigDependencyV1Beta1 returns the Dependency representation of a ExternalConfigDependencyV1Beta1. +// DependencyForExternalConfigDependencyV1Beta1 returns the Dependency representation of a ExternalConfigDependencyV1Beta1. func DependencyForExternalConfigDependencyV1Beta1(dep ExternalConfigDependencyV1Beta1) Dependency { return Dependency{ Remote: dep.Remote, Owner: dep.Owner, Repository: dep.Repository, Commit: dep.Commit, + Digest: "", // digests in v1Beta1 are not valid v1 digests } } @@ -142,3 +152,10 @@ func ExternalConfigDependencyV1Beta1ForDependency(dep Dependency) ExternalConfig type ExternalConfigVersion struct { Version string `json:"version,omitempty" yaml:"version,omitempty"` } + +// isValidDigest returns true when the digest string is successfully parsed +// by the `manifest` pkg. Older buf digests are not considered valid (b1/b3). +func isValidDigest(digest string) bool { + _, err := manifest.NewDigestFromString(digest) + return err == nil +} diff --git a/vendor/github.com/bufbuild/buf/private/bufpkg/bufmanifest/bucket.go b/vendor/github.com/bufbuild/buf/private/bufpkg/bufmanifest/bucket.go new file mode 100644 index 0000000000..3fa75dd545 --- /dev/null +++ b/vendor/github.com/bufbuild/buf/private/bufpkg/bufmanifest/bucket.go @@ -0,0 +1,68 @@ +// Copyright 2020-2023 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bufmanifest + +import ( + "bytes" + "context" + "fmt" + + modulev1alpha1 "github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/module/v1alpha1" + "github.com/bufbuild/buf/private/pkg/manifest" + "github.com/bufbuild/buf/private/pkg/storage" +) + +// NewBucketFromManifestBlobs builds a storage bucket from a manifest blob and a +// set of other blobs, provided in protobuf form. It makes sure that all blobs +// (including manifest) content match with their digest, and additionally checks +// that the blob set matches completely with the manifest paths (no missing nor +// extra blobs). This bucket is suitable for building or exporting. +func NewBucketFromManifestBlobs( + ctx context.Context, + manifestBlob *modulev1alpha1.Blob, + blobs []*modulev1alpha1.Blob, +) (storage.ReadBucket, error) { + if _, err := NewBlobFromProto(manifestBlob); err != nil { + return nil, fmt.Errorf("invalid manifest: %w", err) + } + parsedManifest, err := manifest.NewFromReader( + bytes.NewReader(manifestBlob.Content), + ) + if err != nil { + return nil, fmt.Errorf("parse manifest content: %w", err) + } + var memBlobs []manifest.Blob + for i, modBlob := range blobs { + memBlob, err := NewBlobFromProto(modBlob) + if err != nil { + return nil, fmt.Errorf("invalid blob at index %d: %w", i, err) + } + memBlobs = append(memBlobs, memBlob) + } + blobSet, err := manifest.NewBlobSet(ctx, memBlobs) + if err != nil { + return nil, fmt.Errorf("invalid blobs: %w", err) + } + manifestBucket, err := manifest.NewBucket( + *parsedManifest, + *blobSet, + manifest.BucketWithAllManifestBlobsValidation(), + manifest.BucketWithNoExtraBlobsValidation(), + ) + if err != nil { + return nil, fmt.Errorf("new manifest bucket: %w", err) + } + return manifestBucket, nil +} diff --git a/vendor/github.com/bufbuild/buf/private/pkg/observability/observabilityzap/observabilityzap.go b/vendor/github.com/bufbuild/buf/private/bufpkg/bufmanifest/bufmanifest.go similarity index 66% rename from vendor/github.com/bufbuild/buf/private/pkg/observability/observabilityzap/observabilityzap.go rename to vendor/github.com/bufbuild/buf/private/bufpkg/bufmanifest/bufmanifest.go index 5152d11545..8e9c296df8 100644 --- a/vendor/github.com/bufbuild/buf/private/pkg/observability/observabilityzap/observabilityzap.go +++ b/vendor/github.com/bufbuild/buf/private/bufpkg/bufmanifest/bufmanifest.go @@ -12,14 +12,4 @@ // See the License for the specific language governing permissions and // limitations under the License. -package observabilityzap - -import ( - "github.com/bufbuild/buf/private/pkg/observability" - "go.uber.org/zap" -) - -// NewTraceExportCloser returns a new TraceExportCloser with a zap backend. -func NewTraceExportCloser(logger *zap.Logger) observability.TraceExportCloser { - return newTraceExportCloser(logger) -} +package bufmanifest diff --git a/vendor/github.com/bufbuild/buf/private/bufpkg/bufmanifest/mapper.go b/vendor/github.com/bufbuild/buf/private/bufpkg/bufmanifest/mapper.go new file mode 100644 index 0000000000..a8325231fd --- /dev/null +++ b/vendor/github.com/bufbuild/buf/private/bufpkg/bufmanifest/mapper.go @@ -0,0 +1,115 @@ +// Copyright 2020-2023 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bufmanifest + +import ( + "context" + "fmt" + "io" + + modulev1alpha1 "github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/module/v1alpha1" + "github.com/bufbuild/buf/private/pkg/manifest" + "go.uber.org/multierr" +) + +var ( + protoDigestTypeToDigestType = map[modulev1alpha1.DigestType]manifest.DigestType{ + modulev1alpha1.DigestType_DIGEST_TYPE_SHAKE256: manifest.DigestTypeShake256, + } + digestTypeToProtoDigestType = map[manifest.DigestType]modulev1alpha1.DigestType{ + manifest.DigestTypeShake256: modulev1alpha1.DigestType_DIGEST_TYPE_SHAKE256, + } +) + +// NewDigestFromProtoDigest maps a modulev1alpha1.Digest to a Digest. +func NewDigestFromProtoDigest(digest *modulev1alpha1.Digest) (*manifest.Digest, error) { + if digest == nil { + return nil, fmt.Errorf("nil digest") + } + dType, ok := protoDigestTypeToDigestType[digest.DigestType] + if !ok { + return nil, fmt.Errorf("unsupported digest kind: %s", digest.DigestType.String()) + } + return manifest.NewDigestFromBytes(dType, digest.Digest) +} + +// AsProtoBlob returns the passed blob as a proto module blob. +func AsProtoBlob(ctx context.Context, b manifest.Blob) (_ *modulev1alpha1.Blob, retErr error) { + digestType, ok := digestTypeToProtoDigestType[b.Digest().Type()] + if !ok { + return nil, fmt.Errorf("digest type %q not supported by module proto", b.Digest().Type()) + } + rc, err := b.Open(ctx) + if err != nil { + return nil, fmt.Errorf("cannot open blob: %w", err) + } + defer func() { + retErr = multierr.Append(retErr, rc.Close()) + }() + content, err := io.ReadAll(rc) + if err != nil { + return nil, fmt.Errorf("cannot read blob contents: %w", err) + } + return &modulev1alpha1.Blob{ + Digest: &modulev1alpha1.Digest{ + DigestType: digestType, + Digest: b.Digest().Bytes(), + }, + Content: content, + }, nil +} + +// NewBlobFromProto returns a Blob from a proto module blob. It makes sure the +// digest and content matches. +func NewBlobFromProto(b *modulev1alpha1.Blob) (manifest.Blob, error) { + if b == nil { + return nil, fmt.Errorf("nil blob") + } + digest, err := NewDigestFromProtoDigest(b.Digest) + if err != nil { + return nil, fmt.Errorf("digest from proto digest: %w", err) + } + memBlob, err := manifest.NewMemoryBlob( + *digest, + b.Content, + manifest.MemoryBlobWithDigestValidation(), + ) + if err != nil { + return nil, fmt.Errorf("new memory blob: %w", err) + } + return memBlob, nil +} + +// ToProtoManifestAndBlobs converts a Manifest and BlobSet to the protobuf types. +func ToProtoManifestAndBlobs(ctx context.Context, manifest *manifest.Manifest, blobs *manifest.BlobSet) (*modulev1alpha1.Blob, []*modulev1alpha1.Blob, error) { + manifestBlob, err := manifest.Blob() + if err != nil { + return nil, nil, err + } + manifestProtoBlob, err := AsProtoBlob(ctx, manifestBlob) + if err != nil { + return nil, nil, err + } + filesBlobs := blobs.Blobs() + filesProtoBlobs := make([]*modulev1alpha1.Blob, len(filesBlobs)) + for i, b := range filesBlobs { + pb, err := AsProtoBlob(ctx, b) + if err != nil { + return nil, nil, err + } + filesProtoBlobs[i] = pb + } + return manifestProtoBlob, filesProtoBlobs, nil +} diff --git a/vendor/github.com/bufbuild/buf/private/pkg/tagrangeutil/usage.gen.go b/vendor/github.com/bufbuild/buf/private/bufpkg/bufmanifest/usage.gen.go similarity index 97% rename from vendor/github.com/bufbuild/buf/private/pkg/tagrangeutil/usage.gen.go rename to vendor/github.com/bufbuild/buf/private/bufpkg/bufmanifest/usage.gen.go index 846ad44ed4..46b4e92eab 100644 --- a/vendor/github.com/bufbuild/buf/private/pkg/tagrangeutil/usage.gen.go +++ b/vendor/github.com/bufbuild/buf/private/bufpkg/bufmanifest/usage.gen.go @@ -14,6 +14,6 @@ // Generated. DO NOT EDIT. -package tagrangeutil +package bufmanifest import _ "github.com/bufbuild/buf/private/usage" diff --git a/vendor/github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmodulebuild/bufmodulebuild.go b/vendor/github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmodulebuild/bufmodulebuild.go index 68b2fc8920..e13de64178 100644 --- a/vendor/github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmodulebuild/bufmodulebuild.go +++ b/vendor/github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmodulebuild/bufmodulebuild.go @@ -18,9 +18,7 @@ import ( "context" "github.com/bufbuild/buf/private/bufpkg/bufmodule" - "github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmoduleconfig" "github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmoduleref" - "github.com/bufbuild/buf/private/pkg/storage" "github.com/bufbuild/buf/private/pkg/storage/storageos" "go.uber.org/zap" ) @@ -53,22 +51,13 @@ func WithWorkspace(workspace bufmodule.Workspace) BuildModuleFileSetOption { } // ModuleBucketBuilder builds modules for buckets. -type ModuleBucketBuilder interface { - // BuildForBucket builds a module for the given bucket. - // - // If paths is empty, all files are built. - // Paths should be relative to the bucket, not the roots. - BuildForBucket( - ctx context.Context, - readBucket storage.ReadBucket, - config *bufmoduleconfig.Config, - options ...BuildOption, - ) (bufmodule.Module, error) -} +type ModuleBucketBuilder = *moduleBucketBuilder // NewModuleBucketBuilder returns a new BucketBuilder. -func NewModuleBucketBuilder(logger *zap.Logger) ModuleBucketBuilder { - return newModuleBucketBuilder(logger) +func NewModuleBucketBuilder( + options ...BuildOption, +) ModuleBucketBuilder { + return newModuleBucketBuilder(options...) } // ModuleIncludeBuilder builds modules for includes. diff --git a/vendor/github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmodulebuild/module_bucket_builder.go b/vendor/github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmodulebuild/module_bucket_builder.go index 7cf7ee2fc1..c13f173ac2 100644 --- a/vendor/github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmodulebuild/module_bucket_builder.go +++ b/vendor/github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmodulebuild/module_bucket_builder.go @@ -21,55 +21,64 @@ import ( "github.com/bufbuild/buf/private/bufpkg/buflock" "github.com/bufbuild/buf/private/bufpkg/bufmodule" "github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmoduleconfig" - "github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmoduleref" "github.com/bufbuild/buf/private/pkg/normalpath" "github.com/bufbuild/buf/private/pkg/storage" "github.com/bufbuild/buf/private/pkg/storage/storagemem" - "go.uber.org/zap" ) +// BuiltModule ties a bufmodule.Module with the configuration and a bucket +// containing just the files required to build it. +type BuiltModule struct { + bufmodule.Module + Bucket storage.ReadBucket +} + type moduleBucketBuilder struct { - logger *zap.Logger + opt buildOptions } func newModuleBucketBuilder( - logger *zap.Logger, + options ...BuildOption, ) *moduleBucketBuilder { - return &moduleBucketBuilder{ - logger: logger, + opt := buildOptions{} + for _, option := range options { + option(&opt) } + return &moduleBucketBuilder{opt: opt} } -func (b *moduleBucketBuilder) BuildForBucket( +// BuildForBucket is an alternative constructor of NewModuleBucketBuilder +// followed by calling the BuildForBucket method. +func BuildForBucket( ctx context.Context, readBucket storage.ReadBucket, config *bufmoduleconfig.Config, options ...BuildOption, -) (bufmodule.Module, error) { - buildOptions := &buildOptions{} - for _, option := range options { - option(buildOptions) - } - return b.buildForBucket( +) (*BuiltModule, error) { + builder := newModuleBucketBuilder(options...) + bm, err := builder.BuildForBucket( ctx, readBucket, config, - buildOptions.moduleIdentity, - buildOptions.paths, - buildOptions.excludePaths, - buildOptions.pathsAllowNotExist, ) + if err != nil { + return nil, err + } + return bm, nil } -func (b *moduleBucketBuilder) buildForBucket( +// BuildForBucket constructs a minimal bucket from the passed readBucket and +// builds a module from it. +// +// config's value is used even if the bucket contains configuration (buf.yaml). +// This means the module is built differently than described in storage, which +// may cause building to fail or succeed when it shouldn't. For your own +// sanity, you should pass a config value read from the provided bucket. +func (b *moduleBucketBuilder) BuildForBucket( ctx context.Context, readBucket storage.ReadBucket, config *bufmoduleconfig.Config, - moduleIdentity bufmoduleref.ModuleIdentity, - bucketRelPaths *[]string, - excludeRelPaths []string, - bucketRelPathsAllowNotExist bool, -) (bufmodule.Module, error) { +) (*BuiltModule, error) { // proxy plain files externalPaths := []string{ buflock.ExternalConfigFilePath, @@ -122,22 +131,32 @@ func (b *moduleBucketBuilder) buildForBucket( ), ) } + bucket := storage.MultiReadBucket(rootBuckets...) module, err := bufmodule.NewModuleForBucket( ctx, - storage.MultiReadBucket(rootBuckets...), - bufmodule.ModuleWithModuleIdentity(moduleIdentity /* This may be nil */), + bucket, + bufmodule.ModuleWithModuleIdentity( + b.opt.moduleIdentity, // This may be nil + ), ) if err != nil { return nil, err } - return applyModulePaths( + appliedModule, err := applyModulePaths( module, roots, - bucketRelPaths, - excludeRelPaths, - bucketRelPathsAllowNotExist, + b.opt.paths, + b.opt.excludePaths, + b.opt.pathsAllowNotExist, normalpath.Relative, ) + if err != nil { + return nil, err + } + return &BuiltModule{ + Module: appliedModule, + Bucket: bucket, + }, nil } // may return nil. diff --git a/vendor/github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmoduleref/bufmoduleref.go b/vendor/github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmoduleref/bufmoduleref.go index 30bd0b0dc9..5848e193ed 100644 --- a/vendor/github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmoduleref/bufmoduleref.go +++ b/vendor/github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmoduleref/bufmoduleref.go @@ -277,6 +277,7 @@ type ModulePin interface { // all of these will be set Branch() string Commit() string + Digest() string CreateTime() time.Time isModulePin() @@ -289,9 +290,10 @@ func NewModulePin( repository string, branch string, commit string, + digest string, createTime time.Time, ) (ModulePin, error) { - return newModulePin(remote, owner, repository, branch, commit, createTime) + return newModulePin(remote, owner, repository, branch, commit, digest, createTime) } // NewModulePinForProto returns a new ModulePin for the given proto ModulePin. @@ -389,6 +391,7 @@ func ModulePinEqual(a ModulePin, b ModulePin) bool { a.Repository() == b.Repository() && a.Branch() == b.Branch() && a.Commit() == b.Commit() && + a.Digest() == b.Digest() && a.CreateTime().Equal(b.CreateTime()) } @@ -409,6 +412,7 @@ func DependencyModulePinsForBucket( dep.Repository, "", dep.Commit, + dep.Digest, time.Time{}, ) if err != nil { @@ -445,6 +449,7 @@ func PutDependencyModulePinsToBucket( Owner: pin.Owner(), Repository: pin.Repository(), Commit: pin.Commit(), + Digest: pin.Digest(), }, ) } diff --git a/vendor/github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmoduleref/module_pin.go b/vendor/github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmoduleref/module_pin.go index a0fb837da7..21007c23aa 100644 --- a/vendor/github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmoduleref/module_pin.go +++ b/vendor/github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmoduleref/module_pin.go @@ -28,6 +28,7 @@ type modulePin struct { repository string branch string commit string + digest string createTime time.Time } @@ -37,6 +38,7 @@ func newModulePin( repository string, branch string, commit string, + digest string, createTime time.Time, ) (*modulePin, error) { protoCreateTime, err := prototime.NewTimestamp(createTime) @@ -45,12 +47,13 @@ func newModulePin( } return newModulePinForProto( &modulev1alpha1.ModulePin{ - Remote: remote, - Owner: owner, - Repository: repository, - Branch: branch, - Commit: commit, - CreateTime: protoCreateTime, + Remote: remote, + Owner: owner, + Repository: repository, + Branch: branch, + Commit: commit, + ManifestDigest: digest, + CreateTime: protoCreateTime, }, ) } @@ -67,6 +70,7 @@ func newModulePinForProto( repository: protoModulePin.Repository, branch: protoModulePin.Branch, commit: protoModulePin.Commit, + digest: protoModulePin.ManifestDigest, createTime: protoModulePin.CreateTime.AsTime(), }, nil } @@ -75,11 +79,12 @@ func newProtoModulePinForModulePin( modulePin ModulePin, ) *modulev1alpha1.ModulePin { return &modulev1alpha1.ModulePin{ - Remote: modulePin.Remote(), - Owner: modulePin.Owner(), - Repository: modulePin.Repository(), - Branch: modulePin.Branch(), - Commit: modulePin.Commit(), + Remote: modulePin.Remote(), + Owner: modulePin.Owner(), + Repository: modulePin.Repository(), + Branch: modulePin.Branch(), + Commit: modulePin.Commit(), + ManifestDigest: modulePin.Digest(), // no need to validate as we already know this is valid CreateTime: timestamppb.New(modulePin.CreateTime()), } @@ -105,6 +110,10 @@ func (m *modulePin) Commit() string { return m.commit } +func (m *modulePin) Digest() string { + return m.digest +} + func (m *modulePin) CreateTime() time.Time { return m.createTime } diff --git a/vendor/github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmoduleref/util.go b/vendor/github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmoduleref/util.go index ef12c6156d..07776b76dd 100644 --- a/vendor/github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmoduleref/util.go +++ b/vendor/github.com/bufbuild/buf/private/bufpkg/bufmodule/bufmoduleref/util.go @@ -115,6 +115,12 @@ func modulePinCompareTo(a ModulePin, b ModulePin) int { if a.Commit() > b.Commit() { return 1 } + if a.Digest() < b.Digest() { + return -1 + } + if a.Digest() > b.Digest() { + return 1 + } if a.CreateTime().Before(b.CreateTime()) { return -1 } diff --git a/vendor/github.com/bufbuild/buf/private/bufpkg/bufplugin/bufplugin.go b/vendor/github.com/bufbuild/buf/private/bufpkg/bufplugin/bufplugin.go index 55e93b8a1a..f10579c4b5 100644 --- a/vendor/github.com/bufbuild/buf/private/bufpkg/bufplugin/bufplugin.go +++ b/vendor/github.com/bufbuild/buf/private/bufpkg/bufplugin/bufplugin.go @@ -72,6 +72,8 @@ func PluginToProtoPluginRegistryType(plugin Plugin) registryv1alpha1.PluginRegis registryType = registryv1alpha1.PluginRegistryType_PLUGIN_REGISTRY_TYPE_GO } else if plugin.Registry().NPM != nil { registryType = registryv1alpha1.PluginRegistryType_PLUGIN_REGISTRY_TYPE_NPM + } else if plugin.Registry().Maven != nil { + registryType = registryv1alpha1.PluginRegistryType_PLUGIN_REGISTRY_TYPE_MAVEN } } return registryType @@ -141,6 +143,17 @@ func PluginRegistryToProtoRegistryConfig(pluginRegistry *bufpluginconfig.Registr } } registryConfig.RegistryConfig = ®istryv1alpha1.RegistryConfig_NpmConfig{NpmConfig: npmConfig} + } else if pluginRegistry.Maven != nil { + mavenConfig := ®istryv1alpha1.MavenConfig{} + if pluginRegistry.Maven.Deps != nil { + mavenConfig.RuntimeLibraries = make([]*registryv1alpha1.MavenConfig_RuntimeLibrary, 0, len(pluginRegistry.Maven.Deps)) + for _, gav := range pluginRegistry.Maven.Deps { + mavenConfig.RuntimeLibraries = append(mavenConfig.RuntimeLibraries, ®istryv1alpha1.MavenConfig_RuntimeLibrary{ + Gav: gav, + }) + } + } + registryConfig.RegistryConfig = ®istryv1alpha1.RegistryConfig_MavenConfig{MavenConfig: mavenConfig} } return registryConfig, nil } @@ -181,6 +194,16 @@ func ProtoRegistryConfigToPluginRegistry(config *registryv1alpha1.RegistryConfig } } registryConfig.NPM = npmConfig + } else if config.GetMavenConfig() != nil { + mavenConfig := &bufpluginconfig.MavenRegistryConfig{} + runtimeLibraries := config.GetMavenConfig().GetRuntimeLibraries() + if runtimeLibraries != nil { + mavenConfig.Deps = make([]string, 0, len(runtimeLibraries)) + for _, library := range runtimeLibraries { + mavenConfig.Deps = append(mavenConfig.Deps, library.Gav) + } + } + registryConfig.Maven = mavenConfig } return registryConfig, nil } diff --git a/vendor/github.com/bufbuild/buf/private/bufpkg/bufplugin/bufpluginconfig/bufpluginconfig.go b/vendor/github.com/bufbuild/buf/private/bufpkg/bufplugin/bufpluginconfig/bufpluginconfig.go index b4b48907da..e258fdfada 100644 --- a/vendor/github.com/bufbuild/buf/private/bufpkg/bufplugin/bufpluginconfig/bufpluginconfig.go +++ b/vendor/github.com/bufbuild/buf/private/bufpkg/bufplugin/bufpluginconfig/bufpluginconfig.go @@ -84,8 +84,9 @@ type Config struct { // // Only one field will be set. type RegistryConfig struct { - Go *GoRegistryConfig - NPM *NPMRegistryConfig + Go *GoRegistryConfig + NPM *NPMRegistryConfig + Maven *MavenRegistryConfig // Options is the set of options passed into the plugin for the // remote registry. // @@ -130,6 +131,14 @@ type NPMRegistryDependencyConfig struct { Version string } +// MavenRegistryConfig is the registry configuration for a Maven plugin. +type MavenRegistryConfig struct { + // Deps is a slice of GAV strings. + // A GAV is the groupId:artifactId:version of the dependency. + // See https://maven.apache.org/repositories/artifacts.html for more information. + Deps []string +} + // ConfigOption is an optional option used when loading a Config. type ConfigOption func(*configOptions) @@ -254,9 +263,10 @@ type ExternalDependency struct { // ExternalRegistryConfig is the external configuration for the registry // of a plugin. type ExternalRegistryConfig struct { - Go *ExternalGoRegistryConfig `json:"go,omitempty" yaml:"go,omitempty"` - NPM *ExternalNPMRegistryConfig `json:"npm,omitempty" yaml:"npm,omitempty"` - Opts []string `json:"opts,omitempty" yaml:"opts,omitempty"` + Go *ExternalGoRegistryConfig `json:"go,omitempty" yaml:"go,omitempty"` + NPM *ExternalNPMRegistryConfig `json:"npm,omitempty" yaml:"npm,omitempty"` + Maven *ExternalMavenRegistryConfig `json:"maven,omitempty" yaml:"maven,omitempty"` + Opts []string `json:"opts,omitempty" yaml:"opts,omitempty"` } // ExternalGoRegistryConfig is the external registry configuration for a Go plugin. @@ -281,6 +291,11 @@ type ExternalNPMRegistryConfig struct { ImportStyle string `json:"import_style,omitempty" yaml:"import_style,omitempty"` } +// ExternalMavenRegistryConfig is the external registry configuration for a Maven plugin. +type ExternalMavenRegistryConfig struct { + Deps []string `json:"deps,omitempty" yaml:"deps,omitempty"` +} + type externalConfigVersion struct { Version string `json:"version,omitempty" yaml:"version,omitempty"` } diff --git a/vendor/github.com/bufbuild/buf/private/bufpkg/bufplugin/bufpluginconfig/config.go b/vendor/github.com/bufbuild/buf/private/bufpkg/bufplugin/bufpluginconfig/config.go index bcda1db701..04f0886e2e 100644 --- a/vendor/github.com/bufbuild/buf/private/bufpkg/bufplugin/bufpluginconfig/config.go +++ b/vendor/github.com/bufbuild/buf/private/bufpkg/bufplugin/bufpluginconfig/config.go @@ -85,13 +85,15 @@ func newConfig(externalConfig ExternalConfig, options []ConfigOption) (*Config, func newRegistryConfig(externalRegistryConfig ExternalRegistryConfig) (*RegistryConfig, error) { var ( - isGoEmpty = externalRegistryConfig.Go == nil - isNPMEmpty = externalRegistryConfig.NPM == nil + isGoEmpty = externalRegistryConfig.Go == nil + isNPMEmpty = externalRegistryConfig.NPM == nil + isMavenEmpty = externalRegistryConfig.Maven == nil ) var registryCount int for _, isEmpty := range []bool{ isGoEmpty, isNPMEmpty, + isMavenEmpty, } { if !isEmpty { registryCount++ @@ -116,6 +118,15 @@ func newRegistryConfig(externalRegistryConfig ExternalRegistryConfig) (*Registry NPM: npmRegistryConfig, Options: options, }, nil + } else if !isMavenEmpty { + mavenRegistryConfig, err := newMavenRegistryConfig(externalRegistryConfig.Maven) + if err != nil { + return nil, err + } + return &RegistryConfig{ + Maven: mavenRegistryConfig, + Options: options, + }, nil } // At this point, the Go runtime is guaranteed to be specified. Note // that this will change if/when there are more runtime languages supported. @@ -202,6 +213,22 @@ func newGoRegistryConfig(externalGoRegistryConfig *ExternalGoRegistryConfig) (*G }, nil } +func newMavenRegistryConfig(externalMavenRegistryConfig *ExternalMavenRegistryConfig) (*MavenRegistryConfig, error) { + if externalMavenRegistryConfig == nil { + return nil, nil + } + var dependencies []string + for _, dep := range externalMavenRegistryConfig.Deps { + if dep == "" { + return nil, errors.New("maven runtime dependency is required to be non-empty") + } + dependencies = append(dependencies, dep) + } + return &MavenRegistryConfig{ + Deps: dependencies, + }, nil +} + func pluginIdentityForStringWithOverrideRemote(identityStr string, overrideRemote string) (bufpluginref.PluginIdentity, error) { identity, err := bufpluginref.PluginIdentityForString(identityStr) if err != nil { diff --git a/vendor/github.com/bufbuild/buf/private/bufpkg/bufplugin/bufpluginconfig/get.go b/vendor/github.com/bufbuild/buf/private/bufpkg/bufplugin/bufpluginconfig/get.go index 7c0bccfaa7..61e2095c88 100644 --- a/vendor/github.com/bufbuild/buf/private/bufpkg/bufplugin/bufpluginconfig/get.go +++ b/vendor/github.com/bufbuild/buf/private/bufpkg/bufplugin/bufpluginconfig/get.go @@ -22,13 +22,20 @@ import ( "github.com/bufbuild/buf/private/pkg/encoding" "github.com/bufbuild/buf/private/pkg/storage" "github.com/bufbuild/buf/private/pkg/stringutil" - "go.opencensus.io/trace" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/codes" "go.uber.org/multierr" ) func getConfigForBucket(ctx context.Context, readBucket storage.ReadBucket, options []ConfigOption) (_ *Config, retErr error) { - ctx, span := trace.StartSpan(ctx, "get_plugin_config") + ctx, span := otel.GetTracerProvider().Tracer("bufbuild/buf").Start(ctx, "get_plugin_config") defer span.End() + defer func() { + if retErr != nil { + span.RecordError(retErr) + span.SetStatus(codes.Error, retErr.Error()) + } + }() // This will be in the order of precedence. var foundConfigFilePaths []string // Go through all valid config file paths and see which ones are present. @@ -73,9 +80,9 @@ func getConfigForBucket(ctx context.Context, readBucket storage.ReadBucket, opti } func getConfigForData(ctx context.Context, data []byte, options []ConfigOption) (*Config, error) { - _, span := trace.StartSpan(ctx, "get_plugin_config_for_data") + _, span := otel.GetTracerProvider().Tracer("bufbuild/buf").Start(ctx, "get_plugin_config_for_data") defer span.End() - return getConfigForDataInternal( + config, err := getConfigForDataInternal( ctx, encoding.UnmarshalJSONOrYAMLNonStrict, encoding.UnmarshalJSONOrYAMLStrict, @@ -83,6 +90,11 @@ func getConfigForData(ctx context.Context, data []byte, options []ConfigOption) "Plugin configuration data", options, ) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + } + return config, err } func getConfigForDataInternal( diff --git a/vendor/github.com/bufbuild/buf/private/gen/proto/connect/buf/alpha/registry/v1alpha1/registryv1alpha1connect/download.connect.go b/vendor/github.com/bufbuild/buf/private/gen/proto/connect/buf/alpha/registry/v1alpha1/registryv1alpha1connect/download.connect.go index 2ddfd4a9fc..7d67c2fac2 100644 --- a/vendor/github.com/bufbuild/buf/private/gen/proto/connect/buf/alpha/registry/v1alpha1/registryv1alpha1connect/download.connect.go +++ b/vendor/github.com/bufbuild/buf/private/gen/proto/connect/buf/alpha/registry/v1alpha1/registryv1alpha1connect/download.connect.go @@ -41,8 +41,11 @@ const ( // DownloadServiceClient is a client for the buf.alpha.registry.v1alpha1.DownloadService service. type DownloadServiceClient interface { - // Download downloads. + // Download downloads a BSR module. + // NOTE: Newer clients should use DownloadManifestAndBlobs instead. Download(context.Context, *connect_go.Request[v1alpha1.DownloadRequest]) (*connect_go.Response[v1alpha1.DownloadResponse], error) + // DownloadManifestAndBlobs downloads a module in the manifest+blobs encoding format. + DownloadManifestAndBlobs(context.Context, *connect_go.Request[v1alpha1.DownloadManifestAndBlobsRequest]) (*connect_go.Response[v1alpha1.DownloadManifestAndBlobsResponse], error) } // NewDownloadServiceClient constructs a client for the buf.alpha.registry.v1alpha1.DownloadService @@ -60,12 +63,18 @@ func NewDownloadServiceClient(httpClient connect_go.HTTPClient, baseURL string, baseURL+"/buf.alpha.registry.v1alpha1.DownloadService/Download", opts..., ), + downloadManifestAndBlobs: connect_go.NewClient[v1alpha1.DownloadManifestAndBlobsRequest, v1alpha1.DownloadManifestAndBlobsResponse]( + httpClient, + baseURL+"/buf.alpha.registry.v1alpha1.DownloadService/DownloadManifestAndBlobs", + opts..., + ), } } // downloadServiceClient implements DownloadServiceClient. type downloadServiceClient struct { - download *connect_go.Client[v1alpha1.DownloadRequest, v1alpha1.DownloadResponse] + download *connect_go.Client[v1alpha1.DownloadRequest, v1alpha1.DownloadResponse] + downloadManifestAndBlobs *connect_go.Client[v1alpha1.DownloadManifestAndBlobsRequest, v1alpha1.DownloadManifestAndBlobsResponse] } // Download calls buf.alpha.registry.v1alpha1.DownloadService.Download. @@ -73,11 +82,20 @@ func (c *downloadServiceClient) Download(ctx context.Context, req *connect_go.Re return c.download.CallUnary(ctx, req) } +// DownloadManifestAndBlobs calls +// buf.alpha.registry.v1alpha1.DownloadService.DownloadManifestAndBlobs. +func (c *downloadServiceClient) DownloadManifestAndBlobs(ctx context.Context, req *connect_go.Request[v1alpha1.DownloadManifestAndBlobsRequest]) (*connect_go.Response[v1alpha1.DownloadManifestAndBlobsResponse], error) { + return c.downloadManifestAndBlobs.CallUnary(ctx, req) +} + // DownloadServiceHandler is an implementation of the buf.alpha.registry.v1alpha1.DownloadService // service. type DownloadServiceHandler interface { - // Download downloads. + // Download downloads a BSR module. + // NOTE: Newer clients should use DownloadManifestAndBlobs instead. Download(context.Context, *connect_go.Request[v1alpha1.DownloadRequest]) (*connect_go.Response[v1alpha1.DownloadResponse], error) + // DownloadManifestAndBlobs downloads a module in the manifest+blobs encoding format. + DownloadManifestAndBlobs(context.Context, *connect_go.Request[v1alpha1.DownloadManifestAndBlobsRequest]) (*connect_go.Response[v1alpha1.DownloadManifestAndBlobsResponse], error) } // NewDownloadServiceHandler builds an HTTP handler from the service implementation. It returns the @@ -92,6 +110,11 @@ func NewDownloadServiceHandler(svc DownloadServiceHandler, opts ...connect_go.Ha svc.Download, opts..., )) + mux.Handle("/buf.alpha.registry.v1alpha1.DownloadService/DownloadManifestAndBlobs", connect_go.NewUnaryHandler( + "/buf.alpha.registry.v1alpha1.DownloadService/DownloadManifestAndBlobs", + svc.DownloadManifestAndBlobs, + opts..., + )) return "/buf.alpha.registry.v1alpha1.DownloadService/", mux } @@ -101,3 +124,7 @@ type UnimplementedDownloadServiceHandler struct{} func (UnimplementedDownloadServiceHandler) Download(context.Context, *connect_go.Request[v1alpha1.DownloadRequest]) (*connect_go.Response[v1alpha1.DownloadResponse], error) { return nil, connect_go.NewError(connect_go.CodeUnimplemented, errors.New("buf.alpha.registry.v1alpha1.DownloadService.Download is not implemented")) } + +func (UnimplementedDownloadServiceHandler) DownloadManifestAndBlobs(context.Context, *connect_go.Request[v1alpha1.DownloadManifestAndBlobsRequest]) (*connect_go.Response[v1alpha1.DownloadManifestAndBlobsResponse], error) { + return nil, connect_go.NewError(connect_go.CodeUnimplemented, errors.New("buf.alpha.registry.v1alpha1.DownloadService.DownloadManifestAndBlobs is not implemented")) +} diff --git a/vendor/github.com/bufbuild/buf/private/gen/proto/connect/buf/alpha/registry/v1alpha1/registryv1alpha1connect/push.connect.go b/vendor/github.com/bufbuild/buf/private/gen/proto/connect/buf/alpha/registry/v1alpha1/registryv1alpha1connect/push.connect.go index 6cf6927ce7..614c0d7574 100644 --- a/vendor/github.com/bufbuild/buf/private/gen/proto/connect/buf/alpha/registry/v1alpha1/registryv1alpha1connect/push.connect.go +++ b/vendor/github.com/bufbuild/buf/private/gen/proto/connect/buf/alpha/registry/v1alpha1/registryv1alpha1connect/push.connect.go @@ -42,7 +42,10 @@ const ( // PushServiceClient is a client for the buf.alpha.registry.v1alpha1.PushService service. type PushServiceClient interface { // Push pushes. + // NOTE: Newer clients should use PushManifestAndBlobs. Push(context.Context, *connect_go.Request[v1alpha1.PushRequest]) (*connect_go.Response[v1alpha1.PushResponse], error) + // PushManifestAndBlobs pushes a module by encoding it in a manifest and blobs format. + PushManifestAndBlobs(context.Context, *connect_go.Request[v1alpha1.PushManifestAndBlobsRequest]) (*connect_go.Response[v1alpha1.PushManifestAndBlobsResponse], error) } // NewPushServiceClient constructs a client for the buf.alpha.registry.v1alpha1.PushService service. @@ -60,12 +63,18 @@ func NewPushServiceClient(httpClient connect_go.HTTPClient, baseURL string, opts baseURL+"/buf.alpha.registry.v1alpha1.PushService/Push", opts..., ), + pushManifestAndBlobs: connect_go.NewClient[v1alpha1.PushManifestAndBlobsRequest, v1alpha1.PushManifestAndBlobsResponse]( + httpClient, + baseURL+"/buf.alpha.registry.v1alpha1.PushService/PushManifestAndBlobs", + opts..., + ), } } // pushServiceClient implements PushServiceClient. type pushServiceClient struct { - push *connect_go.Client[v1alpha1.PushRequest, v1alpha1.PushResponse] + push *connect_go.Client[v1alpha1.PushRequest, v1alpha1.PushResponse] + pushManifestAndBlobs *connect_go.Client[v1alpha1.PushManifestAndBlobsRequest, v1alpha1.PushManifestAndBlobsResponse] } // Push calls buf.alpha.registry.v1alpha1.PushService.Push. @@ -73,10 +82,18 @@ func (c *pushServiceClient) Push(ctx context.Context, req *connect_go.Request[v1 return c.push.CallUnary(ctx, req) } +// PushManifestAndBlobs calls buf.alpha.registry.v1alpha1.PushService.PushManifestAndBlobs. +func (c *pushServiceClient) PushManifestAndBlobs(ctx context.Context, req *connect_go.Request[v1alpha1.PushManifestAndBlobsRequest]) (*connect_go.Response[v1alpha1.PushManifestAndBlobsResponse], error) { + return c.pushManifestAndBlobs.CallUnary(ctx, req) +} + // PushServiceHandler is an implementation of the buf.alpha.registry.v1alpha1.PushService service. type PushServiceHandler interface { // Push pushes. + // NOTE: Newer clients should use PushManifestAndBlobs. Push(context.Context, *connect_go.Request[v1alpha1.PushRequest]) (*connect_go.Response[v1alpha1.PushResponse], error) + // PushManifestAndBlobs pushes a module by encoding it in a manifest and blobs format. + PushManifestAndBlobs(context.Context, *connect_go.Request[v1alpha1.PushManifestAndBlobsRequest]) (*connect_go.Response[v1alpha1.PushManifestAndBlobsResponse], error) } // NewPushServiceHandler builds an HTTP handler from the service implementation. It returns the path @@ -91,6 +108,11 @@ func NewPushServiceHandler(svc PushServiceHandler, opts ...connect_go.HandlerOpt svc.Push, opts..., )) + mux.Handle("/buf.alpha.registry.v1alpha1.PushService/PushManifestAndBlobs", connect_go.NewUnaryHandler( + "/buf.alpha.registry.v1alpha1.PushService/PushManifestAndBlobs", + svc.PushManifestAndBlobs, + opts..., + )) return "/buf.alpha.registry.v1alpha1.PushService/", mux } @@ -100,3 +122,7 @@ type UnimplementedPushServiceHandler struct{} func (UnimplementedPushServiceHandler) Push(context.Context, *connect_go.Request[v1alpha1.PushRequest]) (*connect_go.Response[v1alpha1.PushResponse], error) { return nil, connect_go.NewError(connect_go.CodeUnimplemented, errors.New("buf.alpha.registry.v1alpha1.PushService.Push is not implemented")) } + +func (UnimplementedPushServiceHandler) PushManifestAndBlobs(context.Context, *connect_go.Request[v1alpha1.PushManifestAndBlobsRequest]) (*connect_go.Response[v1alpha1.PushManifestAndBlobsResponse], error) { + return nil, connect_go.NewError(connect_go.CodeUnimplemented, errors.New("buf.alpha.registry.v1alpha1.PushService.PushManifestAndBlobs is not implemented")) +} diff --git a/vendor/github.com/bufbuild/buf/private/gen/proto/connect/buf/alpha/registry/v1alpha1/registryv1alpha1connect/recommendation.connect.go b/vendor/github.com/bufbuild/buf/private/gen/proto/connect/buf/alpha/registry/v1alpha1/registryv1alpha1connect/recommendation.connect.go index d333637563..a986840882 100644 --- a/vendor/github.com/bufbuild/buf/private/gen/proto/connect/buf/alpha/registry/v1alpha1/registryv1alpha1connect/recommendation.connect.go +++ b/vendor/github.com/bufbuild/buf/private/gen/proto/connect/buf/alpha/registry/v1alpha1/registryv1alpha1connect/recommendation.connect.go @@ -50,7 +50,7 @@ type RecommendationServiceClient interface { RecommendedTemplates(context.Context, *connect_go.Request[v1alpha1.RecommendedTemplatesRequest]) (*connect_go.Response[v1alpha1.RecommendedTemplatesResponse], error) // ListRecommendedResources returns a list of recommended resources. ListRecommendedResources(context.Context, *connect_go.Request[v1alpha1.ListRecommendedResourcesRequest]) (*connect_go.Response[v1alpha1.ListRecommendedResourcesResponse], error) - // SetRecommendedResources set the list of recommendated resources in the server. + // SetRecommendedResources set the list of recommended resources in the server. SetRecommendedResources(context.Context, *connect_go.Request[v1alpha1.SetRecommendedResourcesRequest]) (*connect_go.Response[v1alpha1.SetRecommendedResourcesResponse], error) } @@ -133,7 +133,7 @@ type RecommendationServiceHandler interface { RecommendedTemplates(context.Context, *connect_go.Request[v1alpha1.RecommendedTemplatesRequest]) (*connect_go.Response[v1alpha1.RecommendedTemplatesResponse], error) // ListRecommendedResources returns a list of recommended resources. ListRecommendedResources(context.Context, *connect_go.Request[v1alpha1.ListRecommendedResourcesRequest]) (*connect_go.Response[v1alpha1.ListRecommendedResourcesResponse], error) - // SetRecommendedResources set the list of recommendated resources in the server. + // SetRecommendedResources set the list of recommended resources in the server. SetRecommendedResources(context.Context, *connect_go.Request[v1alpha1.SetRecommendedResourcesRequest]) (*connect_go.Response[v1alpha1.SetRecommendedResourcesResponse], error) } diff --git a/vendor/github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/module/v1alpha1/module.pb.go b/vendor/github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/module/v1alpha1/module.pb.go index 26367e1144..e5a0a632ea 100644 --- a/vendor/github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/module/v1alpha1/module.pb.go +++ b/vendor/github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/module/v1alpha1/module.pb.go @@ -37,6 +37,168 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +type DigestType int32 + +const ( + DigestType_DIGEST_TYPE_UNSPECIFIED DigestType = 0 + DigestType_DIGEST_TYPE_SHAKE256 DigestType = 1 +) + +// Enum value maps for DigestType. +var ( + DigestType_name = map[int32]string{ + 0: "DIGEST_TYPE_UNSPECIFIED", + 1: "DIGEST_TYPE_SHAKE256", + } + DigestType_value = map[string]int32{ + "DIGEST_TYPE_UNSPECIFIED": 0, + "DIGEST_TYPE_SHAKE256": 1, + } +) + +func (x DigestType) Enum() *DigestType { + p := new(DigestType) + *p = x + return p +} + +func (x DigestType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (DigestType) Descriptor() protoreflect.EnumDescriptor { + return file_buf_alpha_module_v1alpha1_module_proto_enumTypes[0].Descriptor() +} + +func (DigestType) Type() protoreflect.EnumType { + return &file_buf_alpha_module_v1alpha1_module_proto_enumTypes[0] +} + +func (x DigestType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use DigestType.Descriptor instead. +func (DigestType) EnumDescriptor() ([]byte, []int) { + return file_buf_alpha_module_v1alpha1_module_proto_rawDescGZIP(), []int{0} +} + +// Digest represents a hash function's value. +type Digest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // digest_type describes the hash algorithm. e.g. "SHAKE256" + DigestType DigestType `protobuf:"varint,1,opt,name=digest_type,json=digestType,proto3,enum=buf.alpha.module.v1alpha1.DigestType" json:"digest_type,omitempty"` + // digest is the hash's output without encoding. + Digest []byte `protobuf:"bytes,2,opt,name=digest,proto3" json:"digest,omitempty"` +} + +func (x *Digest) Reset() { + *x = Digest{} + if protoimpl.UnsafeEnabled { + mi := &file_buf_alpha_module_v1alpha1_module_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Digest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Digest) ProtoMessage() {} + +func (x *Digest) ProtoReflect() protoreflect.Message { + mi := &file_buf_alpha_module_v1alpha1_module_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Digest.ProtoReflect.Descriptor instead. +func (*Digest) Descriptor() ([]byte, []int) { + return file_buf_alpha_module_v1alpha1_module_proto_rawDescGZIP(), []int{0} +} + +func (x *Digest) GetDigestType() DigestType { + if x != nil { + return x.DigestType + } + return DigestType_DIGEST_TYPE_UNSPECIFIED +} + +func (x *Digest) GetDigest() []byte { + if x != nil { + return x.Digest + } + return nil +} + +// Blob represents some module content with an associated hash. +type Blob struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Digest of the content. + Digest *Digest `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"` + // Content of the blob. Optional when Blob is used as a content pointer. + Content []byte `protobuf:"bytes,2,opt,name=content,proto3" json:"content,omitempty"` +} + +func (x *Blob) Reset() { + *x = Blob{} + if protoimpl.UnsafeEnabled { + mi := &file_buf_alpha_module_v1alpha1_module_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Blob) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Blob) ProtoMessage() {} + +func (x *Blob) ProtoReflect() protoreflect.Message { + mi := &file_buf_alpha_module_v1alpha1_module_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Blob.ProtoReflect.Descriptor instead. +func (*Blob) Descriptor() ([]byte, []int) { + return file_buf_alpha_module_v1alpha1_module_proto_rawDescGZIP(), []int{1} +} + +func (x *Blob) GetDigest() *Digest { + if x != nil { + return x.Digest + } + return nil +} + +func (x *Blob) GetContent() []byte { + if x != nil { + return x.Content + } + return nil +} + // Module is a module. type Module struct { state protoimpl.MessageState @@ -70,7 +232,7 @@ type Module struct { func (x *Module) Reset() { *x = Module{} if protoimpl.UnsafeEnabled { - mi := &file_buf_alpha_module_v1alpha1_module_proto_msgTypes[0] + mi := &file_buf_alpha_module_v1alpha1_module_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -83,7 +245,7 @@ func (x *Module) String() string { func (*Module) ProtoMessage() {} func (x *Module) ProtoReflect() protoreflect.Message { - mi := &file_buf_alpha_module_v1alpha1_module_proto_msgTypes[0] + mi := &file_buf_alpha_module_v1alpha1_module_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -96,7 +258,7 @@ func (x *Module) ProtoReflect() protoreflect.Message { // Deprecated: Use Module.ProtoReflect.Descriptor instead. func (*Module) Descriptor() ([]byte, []int) { - return file_buf_alpha_module_v1alpha1_module_proto_rawDescGZIP(), []int{0} + return file_buf_alpha_module_v1alpha1_module_proto_rawDescGZIP(), []int{2} } func (x *Module) GetFiles() []*ModuleFile { @@ -157,7 +319,7 @@ type ModuleFile struct { func (x *ModuleFile) Reset() { *x = ModuleFile{} if protoimpl.UnsafeEnabled { - mi := &file_buf_alpha_module_v1alpha1_module_proto_msgTypes[1] + mi := &file_buf_alpha_module_v1alpha1_module_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -170,7 +332,7 @@ func (x *ModuleFile) String() string { func (*ModuleFile) ProtoMessage() {} func (x *ModuleFile) ProtoReflect() protoreflect.Message { - mi := &file_buf_alpha_module_v1alpha1_module_proto_msgTypes[1] + mi := &file_buf_alpha_module_v1alpha1_module_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -183,7 +345,7 @@ func (x *ModuleFile) ProtoReflect() protoreflect.Message { // Deprecated: Use ModuleFile.ProtoReflect.Descriptor instead. func (*ModuleFile) Descriptor() ([]byte, []int) { - return file_buf_alpha_module_v1alpha1_module_proto_rawDescGZIP(), []int{1} + return file_buf_alpha_module_v1alpha1_module_proto_rawDescGZIP(), []int{3} } func (x *ModuleFile) GetPath() string { @@ -216,7 +378,7 @@ type ModuleReference struct { func (x *ModuleReference) Reset() { *x = ModuleReference{} if protoimpl.UnsafeEnabled { - mi := &file_buf_alpha_module_v1alpha1_module_proto_msgTypes[2] + mi := &file_buf_alpha_module_v1alpha1_module_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -229,7 +391,7 @@ func (x *ModuleReference) String() string { func (*ModuleReference) ProtoMessage() {} func (x *ModuleReference) ProtoReflect() protoreflect.Message { - mi := &file_buf_alpha_module_v1alpha1_module_proto_msgTypes[2] + mi := &file_buf_alpha_module_v1alpha1_module_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -242,7 +404,7 @@ func (x *ModuleReference) ProtoReflect() protoreflect.Message { // Deprecated: Use ModuleReference.ProtoReflect.Descriptor instead. func (*ModuleReference) Descriptor() ([]byte, []int) { - return file_buf_alpha_module_v1alpha1_module_proto_rawDescGZIP(), []int{2} + return file_buf_alpha_module_v1alpha1_module_proto_rawDescGZIP(), []int{4} } func (x *ModuleReference) GetRemote() string { @@ -285,12 +447,14 @@ type ModulePin struct { Branch string `protobuf:"bytes,4,opt,name=branch,proto3" json:"branch,omitempty"` Commit string `protobuf:"bytes,5,opt,name=commit,proto3" json:"commit,omitempty"` CreateTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + // Module's manifest digest. Replacement for previous b1/b3 digests. + ManifestDigest string `protobuf:"bytes,8,opt,name=manifest_digest,json=manifestDigest,proto3" json:"manifest_digest,omitempty"` } func (x *ModulePin) Reset() { *x = ModulePin{} if protoimpl.UnsafeEnabled { - mi := &file_buf_alpha_module_v1alpha1_module_proto_msgTypes[3] + mi := &file_buf_alpha_module_v1alpha1_module_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -303,7 +467,7 @@ func (x *ModulePin) String() string { func (*ModulePin) ProtoMessage() {} func (x *ModulePin) ProtoReflect() protoreflect.Message { - mi := &file_buf_alpha_module_v1alpha1_module_proto_msgTypes[3] + mi := &file_buf_alpha_module_v1alpha1_module_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -316,7 +480,7 @@ func (x *ModulePin) ProtoReflect() protoreflect.Message { // Deprecated: Use ModulePin.ProtoReflect.Descriptor instead. func (*ModulePin) Descriptor() ([]byte, []int) { - return file_buf_alpha_module_v1alpha1_module_proto_rawDescGZIP(), []int{3} + return file_buf_alpha_module_v1alpha1_module_proto_rawDescGZIP(), []int{5} } func (x *ModulePin) GetRemote() string { @@ -361,6 +525,13 @@ func (x *ModulePin) GetCreateTime() *timestamppb.Timestamp { return nil } +func (x *ModulePin) GetManifestDigest() string { + if x != nil { + return x.ManifestDigest + } + return "" +} + var File_buf_alpha_module_v1alpha1_module_proto protoreflect.FileDescriptor var file_buf_alpha_module_v1alpha1_module_proto_rawDesc = []byte{ @@ -374,71 +545,90 @@ var file_buf_alpha_module_v1alpha1_module_proto_rawDesc = []byte{ 0x68, 0x61, 0x2f, 0x6c, 0x69, 0x6e, 0x74, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd3, 0x02, 0x0a, 0x06, 0x4d, 0x6f, 0x64, - 0x75, 0x6c, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x6d, - 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4d, - 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, - 0x12, 0x48, 0x0a, 0x0c, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x2e, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x50, 0x69, 0x6e, 0x52, 0x0c, 0x64, 0x65, - 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x6f, - 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x46, 0x0a, 0x0f, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x62, 0x75, 0x66, 0x2e, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x69, 0x6e, 0x67, 0x2e, 0x76, - 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x69, - 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3a, 0x0a, 0x0b, 0x6c, 0x69, 0x6e, 0x74, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x6c, 0x69, 0x6e, 0x74, 0x2e, 0x76, - 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x6c, 0x69, 0x6e, 0x74, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x22, 0x3a, - 0x0a, 0x0a, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, - 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, - 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x22, 0x7d, 0x0a, 0x0f, 0x4d, 0x6f, - 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, - 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x72, - 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x72, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x72, - 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x22, 0xd4, 0x01, 0x0a, 0x09, 0x4d, 0x6f, - 0x64, 0x75, 0x6c, 0x65, 0x50, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x72, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, - 0x6f, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x70, 0x6f, 0x73, - 0x69, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x12, 0x16, 0x0a, - 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, - 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, - 0x6d, 0x65, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, - 0x42, 0x8a, 0x02, 0x0a, 0x1d, 0x63, 0x6f, 0x6d, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x2e, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x42, 0x0b, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x55, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x62, 0x75, - 0x66, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x69, 0x76, 0x61, - 0x74, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x2f, - 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, - 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xa2, 0x02, 0x03, 0x42, 0x41, 0x4d, 0xaa, 0x02, - 0x19, 0x42, 0x75, 0x66, 0x2e, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x6f, 0x64, 0x75, 0x6c, - 0x65, 0x2e, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x19, 0x42, 0x75, 0x66, - 0x5c, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x5c, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x5c, 0x56, 0x31, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xe2, 0x02, 0x25, 0x42, 0x75, 0x66, 0x5c, 0x41, 0x6c, 0x70, - 0x68, 0x61, 0x5c, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, - 0x1c, 0x42, 0x75, 0x66, 0x3a, 0x3a, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x3a, 0x3a, 0x4d, 0x6f, 0x64, - 0x75, 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x68, 0x0a, 0x06, 0x44, 0x69, 0x67, 0x65, + 0x73, 0x74, 0x12, 0x46, 0x0a, 0x0b, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x2e, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x31, 0x2e, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, + 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, + 0x67, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, + 0x73, 0x74, 0x22, 0x5b, 0x0a, 0x04, 0x42, 0x6c, 0x6f, 0x62, 0x12, 0x39, 0x0a, 0x06, 0x64, 0x69, + 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x62, 0x75, 0x66, + 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x52, 0x06, 0x64, + 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x22, + 0xd3, 0x02, 0x0a, 0x06, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, + 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x62, 0x75, 0x66, 0x2e, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, + 0x52, 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x0c, 0x64, 0x65, 0x70, 0x65, 0x6e, + 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, + 0x50, 0x69, 0x6e, 0x52, 0x0c, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, + 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x46, 0x0a, 0x0f, 0x62, 0x72, 0x65, 0x61, 0x6b, + 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x62, 0x72, 0x65, + 0x61, 0x6b, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x0e, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x3a, 0x0a, 0x0b, 0x6c, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x2e, 0x6c, 0x69, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x0a, 0x6c, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x6c, + 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x69, + 0x63, 0x65, 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x0a, 0x0a, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x46, + 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x22, 0x7d, 0x0a, 0x0f, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, + 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x72, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x6f, + 0x72, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x22, 0xfd, 0x01, 0x0a, 0x09, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x50, 0x69, 0x6e, 0x12, 0x16, + 0x0a, 0x06, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, + 0x72, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x72, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, + 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x72, + 0x61, 0x6e, 0x63, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x3b, 0x0a, 0x0b, + 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x61, 0x6e, + 0x69, 0x66, 0x65, 0x73, 0x74, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x44, 0x69, 0x67, 0x65, + 0x73, 0x74, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, + 0x2a, 0x43, 0x0a, 0x0a, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, + 0x0a, 0x17, 0x44, 0x49, 0x47, 0x45, 0x53, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x44, + 0x49, 0x47, 0x45, 0x53, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x4b, 0x45, + 0x32, 0x35, 0x36, 0x10, 0x01, 0x42, 0x8a, 0x02, 0x0a, 0x1d, 0x63, 0x6f, 0x6d, 0x2e, 0x62, 0x75, + 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x0b, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x55, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x62, 0x75, 0x66, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2f, 0x62, 0x75, 0x66, 0x2f, + 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2f, 0x67, 0x6f, 0x2f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x6d, + 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x6d, + 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xa2, 0x02, 0x03, + 0x42, 0x41, 0x4d, 0xaa, 0x02, 0x19, 0x42, 0x75, 0x66, 0x2e, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, + 0x02, 0x19, 0x42, 0x75, 0x66, 0x5c, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x5c, 0x4d, 0x6f, 0x64, 0x75, + 0x6c, 0x65, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xe2, 0x02, 0x25, 0x42, 0x75, + 0x66, 0x5c, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x5c, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x5c, 0x56, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0xea, 0x02, 0x1c, 0x42, 0x75, 0x66, 0x3a, 0x3a, 0x41, 0x6c, 0x70, 0x68, 0x61, + 0x3a, 0x3a, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x3a, 0x3a, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -453,27 +643,33 @@ func file_buf_alpha_module_v1alpha1_module_proto_rawDescGZIP() []byte { return file_buf_alpha_module_v1alpha1_module_proto_rawDescData } -var file_buf_alpha_module_v1alpha1_module_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_buf_alpha_module_v1alpha1_module_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_buf_alpha_module_v1alpha1_module_proto_msgTypes = make([]protoimpl.MessageInfo, 6) var file_buf_alpha_module_v1alpha1_module_proto_goTypes = []interface{}{ - (*Module)(nil), // 0: buf.alpha.module.v1alpha1.Module - (*ModuleFile)(nil), // 1: buf.alpha.module.v1alpha1.ModuleFile - (*ModuleReference)(nil), // 2: buf.alpha.module.v1alpha1.ModuleReference - (*ModulePin)(nil), // 3: buf.alpha.module.v1alpha1.ModulePin - (*v1.Config)(nil), // 4: buf.alpha.breaking.v1.Config - (*v11.Config)(nil), // 5: buf.alpha.lint.v1.Config - (*timestamppb.Timestamp)(nil), // 6: google.protobuf.Timestamp + (DigestType)(0), // 0: buf.alpha.module.v1alpha1.DigestType + (*Digest)(nil), // 1: buf.alpha.module.v1alpha1.Digest + (*Blob)(nil), // 2: buf.alpha.module.v1alpha1.Blob + (*Module)(nil), // 3: buf.alpha.module.v1alpha1.Module + (*ModuleFile)(nil), // 4: buf.alpha.module.v1alpha1.ModuleFile + (*ModuleReference)(nil), // 5: buf.alpha.module.v1alpha1.ModuleReference + (*ModulePin)(nil), // 6: buf.alpha.module.v1alpha1.ModulePin + (*v1.Config)(nil), // 7: buf.alpha.breaking.v1.Config + (*v11.Config)(nil), // 8: buf.alpha.lint.v1.Config + (*timestamppb.Timestamp)(nil), // 9: google.protobuf.Timestamp } var file_buf_alpha_module_v1alpha1_module_proto_depIdxs = []int32{ - 1, // 0: buf.alpha.module.v1alpha1.Module.files:type_name -> buf.alpha.module.v1alpha1.ModuleFile - 3, // 1: buf.alpha.module.v1alpha1.Module.dependencies:type_name -> buf.alpha.module.v1alpha1.ModulePin - 4, // 2: buf.alpha.module.v1alpha1.Module.breaking_config:type_name -> buf.alpha.breaking.v1.Config - 5, // 3: buf.alpha.module.v1alpha1.Module.lint_config:type_name -> buf.alpha.lint.v1.Config - 6, // 4: buf.alpha.module.v1alpha1.ModulePin.create_time:type_name -> google.protobuf.Timestamp - 5, // [5:5] is the sub-list for method output_type - 5, // [5:5] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name + 0, // 0: buf.alpha.module.v1alpha1.Digest.digest_type:type_name -> buf.alpha.module.v1alpha1.DigestType + 1, // 1: buf.alpha.module.v1alpha1.Blob.digest:type_name -> buf.alpha.module.v1alpha1.Digest + 4, // 2: buf.alpha.module.v1alpha1.Module.files:type_name -> buf.alpha.module.v1alpha1.ModuleFile + 6, // 3: buf.alpha.module.v1alpha1.Module.dependencies:type_name -> buf.alpha.module.v1alpha1.ModulePin + 7, // 4: buf.alpha.module.v1alpha1.Module.breaking_config:type_name -> buf.alpha.breaking.v1.Config + 8, // 5: buf.alpha.module.v1alpha1.Module.lint_config:type_name -> buf.alpha.lint.v1.Config + 9, // 6: buf.alpha.module.v1alpha1.ModulePin.create_time:type_name -> google.protobuf.Timestamp + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name } func init() { file_buf_alpha_module_v1alpha1_module_proto_init() } @@ -483,7 +679,7 @@ func file_buf_alpha_module_v1alpha1_module_proto_init() { } if !protoimpl.UnsafeEnabled { file_buf_alpha_module_v1alpha1_module_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Module); i { + switch v := v.(*Digest); i { case 0: return &v.state case 1: @@ -495,7 +691,7 @@ func file_buf_alpha_module_v1alpha1_module_proto_init() { } } file_buf_alpha_module_v1alpha1_module_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ModuleFile); i { + switch v := v.(*Blob); i { case 0: return &v.state case 1: @@ -507,7 +703,7 @@ func file_buf_alpha_module_v1alpha1_module_proto_init() { } } file_buf_alpha_module_v1alpha1_module_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ModuleReference); i { + switch v := v.(*Module); i { case 0: return &v.state case 1: @@ -519,6 +715,30 @@ func file_buf_alpha_module_v1alpha1_module_proto_init() { } } file_buf_alpha_module_v1alpha1_module_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ModuleFile); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_buf_alpha_module_v1alpha1_module_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ModuleReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_buf_alpha_module_v1alpha1_module_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ModulePin); i { case 0: return &v.state @@ -536,13 +756,14 @@ func file_buf_alpha_module_v1alpha1_module_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_buf_alpha_module_v1alpha1_module_proto_rawDesc, - NumEnums: 0, - NumMessages: 4, + NumEnums: 1, + NumMessages: 6, NumExtensions: 0, NumServices: 0, }, GoTypes: file_buf_alpha_module_v1alpha1_module_proto_goTypes, DependencyIndexes: file_buf_alpha_module_v1alpha1_module_proto_depIdxs, + EnumInfos: file_buf_alpha_module_v1alpha1_module_proto_enumTypes, MessageInfos: file_buf_alpha_module_v1alpha1_module_proto_msgTypes, }.Build() File_buf_alpha_module_v1alpha1_module_proto = out.File diff --git a/vendor/github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/registry/v1alpha1/download.pb.go b/vendor/github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/registry/v1alpha1/download.pb.go index 4cb76ab475..9770f23142 100644 --- a/vendor/github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/registry/v1alpha1/download.pb.go +++ b/vendor/github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/registry/v1alpha1/download.pb.go @@ -35,6 +35,7 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// DownloadRequest specifies the module to download. type DownloadRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -98,6 +99,7 @@ func (x *DownloadRequest) GetReference() string { return "" } +// DownloadResponse contains the remote module. type DownloadResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -145,6 +147,129 @@ func (x *DownloadResponse) GetModule() *v1alpha1.Module { return nil } +// DownloadManifestAndBlobsRequest specifies the module to download. +type DownloadManifestAndBlobsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner,omitempty"` + Repository string `protobuf:"bytes,2,opt,name=repository,proto3" json:"repository,omitempty"` + Reference string `protobuf:"bytes,3,opt,name=reference,proto3" json:"reference,omitempty"` +} + +func (x *DownloadManifestAndBlobsRequest) Reset() { + *x = DownloadManifestAndBlobsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_buf_alpha_registry_v1alpha1_download_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DownloadManifestAndBlobsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DownloadManifestAndBlobsRequest) ProtoMessage() {} + +func (x *DownloadManifestAndBlobsRequest) ProtoReflect() protoreflect.Message { + mi := &file_buf_alpha_registry_v1alpha1_download_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DownloadManifestAndBlobsRequest.ProtoReflect.Descriptor instead. +func (*DownloadManifestAndBlobsRequest) Descriptor() ([]byte, []int) { + return file_buf_alpha_registry_v1alpha1_download_proto_rawDescGZIP(), []int{2} +} + +func (x *DownloadManifestAndBlobsRequest) GetOwner() string { + if x != nil { + return x.Owner + } + return "" +} + +func (x *DownloadManifestAndBlobsRequest) GetRepository() string { + if x != nil { + return x.Repository + } + return "" +} + +func (x *DownloadManifestAndBlobsRequest) GetReference() string { + if x != nil { + return x.Reference + } + return "" +} + +// DownloadManifestAndBlobsResponse is the returned resolved remote module. +type DownloadManifestAndBlobsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // manifest is the manifest of the module's content. + Manifest *v1alpha1.Blob `protobuf:"bytes,1,opt,name=manifest,proto3" json:"manifest,omitempty"` + // blobs is a set of blobs that closes on the module's manifest to form the + // complete module's content. + Blobs []*v1alpha1.Blob `protobuf:"bytes,2,rep,name=blobs,proto3" json:"blobs,omitempty"` +} + +func (x *DownloadManifestAndBlobsResponse) Reset() { + *x = DownloadManifestAndBlobsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_buf_alpha_registry_v1alpha1_download_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DownloadManifestAndBlobsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DownloadManifestAndBlobsResponse) ProtoMessage() {} + +func (x *DownloadManifestAndBlobsResponse) ProtoReflect() protoreflect.Message { + mi := &file_buf_alpha_registry_v1alpha1_download_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DownloadManifestAndBlobsResponse.ProtoReflect.Descriptor instead. +func (*DownloadManifestAndBlobsResponse) Descriptor() ([]byte, []int) { + return file_buf_alpha_registry_v1alpha1_download_proto_rawDescGZIP(), []int{3} +} + +func (x *DownloadManifestAndBlobsResponse) GetManifest() *v1alpha1.Blob { + if x != nil { + return x.Manifest + } + return nil +} + +func (x *DownloadManifestAndBlobsResponse) GetBlobs() []*v1alpha1.Blob { + if x != nil { + return x.Blobs + } + return nil +} + var File_buf_alpha_registry_v1alpha1_download_proto protoreflect.FileDescriptor var file_buf_alpha_registry_v1alpha1_download_proto_rawDesc = []byte{ @@ -166,33 +291,60 @@ var file_buf_alpha_registry_v1alpha1_download_proto_rawDesc = []byte{ 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x52, - 0x06, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x32, 0x7a, 0x0a, 0x0f, 0x44, 0x6f, 0x77, 0x6e, 0x6c, - 0x6f, 0x61, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x67, 0x0a, 0x08, 0x44, 0x6f, - 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x2c, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x06, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x22, 0x75, 0x0a, 0x1f, 0x44, 0x6f, 0x77, 0x6e, 0x6c, + 0x6f, 0x61, 0x64, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x41, 0x6e, 0x64, 0x42, 0x6c, + 0x6f, 0x62, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, + 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, + 0x12, 0x1e, 0x0a, 0x0a, 0x72, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, + 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x22, 0x96, + 0x01, 0x0a, 0x20, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x61, 0x6e, 0x69, 0x66, + 0x65, 0x73, 0x74, 0x41, 0x6e, 0x64, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x08, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x31, 0x2e, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x08, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, + 0x12, 0x35, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1f, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x6d, 0x6f, 0x64, 0x75, + 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x42, 0x6c, 0x6f, 0x62, + 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x32, 0x94, 0x02, 0x0a, 0x0f, 0x44, 0x6f, 0x77, 0x6e, + 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x67, 0x0a, 0x08, 0x44, + 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x2c, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x31, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x97, 0x01, 0x0a, 0x18, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, + 0x64, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x41, 0x6e, 0x64, 0x42, 0x6c, 0x6f, 0x62, + 0x73, 0x12, 0x3c, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, + 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, + 0x41, 0x6e, 0x64, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x3d, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x6f, + 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x41, 0x6e, + 0x64, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x9a, + 0x02, 0x0a, 0x1f, 0x63, 0x6f, 0x6d, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x2e, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x42, 0x9a, 0x02, 0x0a, 0x1f, 0x63, 0x6f, 0x6d, 0x2e, 0x62, 0x75, 0x66, 0x2e, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x0d, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, - 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x59, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x62, 0x75, 0x66, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2f, 0x62, 0x75, - 0x66, 0x2f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x2f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x2f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x3b, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0xa2, 0x02, 0x03, 0x42, 0x41, 0x52, 0xaa, 0x02, 0x1b, 0x42, 0x75, 0x66, 0x2e, - 0x41, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x56, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x1b, 0x42, 0x75, 0x66, 0x5c, 0x41, 0x6c, - 0x70, 0x68, 0x61, 0x5c, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x5c, 0x56, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0xe2, 0x02, 0x27, 0x42, 0x75, 0x66, 0x5c, 0x41, 0x6c, 0x70, 0x68, - 0x61, 0x5c, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, - 0x02, 0x1e, 0x42, 0x75, 0x66, 0x3a, 0x3a, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x3a, 0x3a, 0x52, 0x65, - 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x3a, 0x3a, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x31, 0x42, 0x0d, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x59, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x62, 0x75, 0x66, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x69, + 0x76, 0x61, 0x74, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, + 0x6f, 0x2f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x72, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x72, 0x65, + 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xa2, 0x02, + 0x03, 0x42, 0x41, 0x52, 0xaa, 0x02, 0x1b, 0x42, 0x75, 0x66, 0x2e, 0x41, 0x6c, 0x70, 0x68, 0x61, + 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0xca, 0x02, 0x1b, 0x42, 0x75, 0x66, 0x5c, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x5c, 0x52, + 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0xe2, 0x02, 0x27, 0x42, 0x75, 0x66, 0x5c, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x5c, 0x52, 0x65, 0x67, + 0x69, 0x73, 0x74, 0x72, 0x79, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x5c, 0x47, + 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1e, 0x42, 0x75, 0x66, + 0x3a, 0x3a, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x3a, 0x3a, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, + 0x79, 0x3a, 0x3a, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -207,21 +359,28 @@ func file_buf_alpha_registry_v1alpha1_download_proto_rawDescGZIP() []byte { return file_buf_alpha_registry_v1alpha1_download_proto_rawDescData } -var file_buf_alpha_registry_v1alpha1_download_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_buf_alpha_registry_v1alpha1_download_proto_msgTypes = make([]protoimpl.MessageInfo, 4) var file_buf_alpha_registry_v1alpha1_download_proto_goTypes = []interface{}{ - (*DownloadRequest)(nil), // 0: buf.alpha.registry.v1alpha1.DownloadRequest - (*DownloadResponse)(nil), // 1: buf.alpha.registry.v1alpha1.DownloadResponse - (*v1alpha1.Module)(nil), // 2: buf.alpha.module.v1alpha1.Module + (*DownloadRequest)(nil), // 0: buf.alpha.registry.v1alpha1.DownloadRequest + (*DownloadResponse)(nil), // 1: buf.alpha.registry.v1alpha1.DownloadResponse + (*DownloadManifestAndBlobsRequest)(nil), // 2: buf.alpha.registry.v1alpha1.DownloadManifestAndBlobsRequest + (*DownloadManifestAndBlobsResponse)(nil), // 3: buf.alpha.registry.v1alpha1.DownloadManifestAndBlobsResponse + (*v1alpha1.Module)(nil), // 4: buf.alpha.module.v1alpha1.Module + (*v1alpha1.Blob)(nil), // 5: buf.alpha.module.v1alpha1.Blob } var file_buf_alpha_registry_v1alpha1_download_proto_depIdxs = []int32{ - 2, // 0: buf.alpha.registry.v1alpha1.DownloadResponse.module:type_name -> buf.alpha.module.v1alpha1.Module - 0, // 1: buf.alpha.registry.v1alpha1.DownloadService.Download:input_type -> buf.alpha.registry.v1alpha1.DownloadRequest - 1, // 2: buf.alpha.registry.v1alpha1.DownloadService.Download:output_type -> buf.alpha.registry.v1alpha1.DownloadResponse - 2, // [2:3] is the sub-list for method output_type - 1, // [1:2] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 4, // 0: buf.alpha.registry.v1alpha1.DownloadResponse.module:type_name -> buf.alpha.module.v1alpha1.Module + 5, // 1: buf.alpha.registry.v1alpha1.DownloadManifestAndBlobsResponse.manifest:type_name -> buf.alpha.module.v1alpha1.Blob + 5, // 2: buf.alpha.registry.v1alpha1.DownloadManifestAndBlobsResponse.blobs:type_name -> buf.alpha.module.v1alpha1.Blob + 0, // 3: buf.alpha.registry.v1alpha1.DownloadService.Download:input_type -> buf.alpha.registry.v1alpha1.DownloadRequest + 2, // 4: buf.alpha.registry.v1alpha1.DownloadService.DownloadManifestAndBlobs:input_type -> buf.alpha.registry.v1alpha1.DownloadManifestAndBlobsRequest + 1, // 5: buf.alpha.registry.v1alpha1.DownloadService.Download:output_type -> buf.alpha.registry.v1alpha1.DownloadResponse + 3, // 6: buf.alpha.registry.v1alpha1.DownloadService.DownloadManifestAndBlobs:output_type -> buf.alpha.registry.v1alpha1.DownloadManifestAndBlobsResponse + 5, // [5:7] is the sub-list for method output_type + 3, // [3:5] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name } func init() { file_buf_alpha_registry_v1alpha1_download_proto_init() } @@ -254,6 +413,30 @@ func file_buf_alpha_registry_v1alpha1_download_proto_init() { return nil } } + file_buf_alpha_registry_v1alpha1_download_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DownloadManifestAndBlobsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_buf_alpha_registry_v1alpha1_download_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DownloadManifestAndBlobsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -261,7 +444,7 @@ func file_buf_alpha_registry_v1alpha1_download_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_buf_alpha_registry_v1alpha1_download_proto_rawDesc, NumEnums: 0, - NumMessages: 2, + NumMessages: 4, NumExtensions: 0, NumServices: 1, }, diff --git a/vendor/github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/registry/v1alpha1/module.pb.go b/vendor/github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/registry/v1alpha1/module.pb.go index c7d3665d97..b7e745802b 100644 --- a/vendor/github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/registry/v1alpha1/module.pb.go +++ b/vendor/github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/registry/v1alpha1/module.pb.go @@ -110,12 +110,14 @@ type LocalModulePin struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner,omitempty"` - Repository string `protobuf:"bytes,2,opt,name=repository,proto3" json:"repository,omitempty"` - Branch string `protobuf:"bytes,3,opt,name=branch,proto3" json:"branch,omitempty"` - Commit string `protobuf:"bytes,4,opt,name=commit,proto3" json:"commit,omitempty"` - CreateTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` - DraftName string `protobuf:"bytes,8,opt,name=draft_name,json=draftName,proto3" json:"draft_name,omitempty"` + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner,omitempty"` + Repository string `protobuf:"bytes,2,opt,name=repository,proto3" json:"repository,omitempty"` + Branch string `protobuf:"bytes,3,opt,name=branch,proto3" json:"branch,omitempty"` + Commit string `protobuf:"bytes,4,opt,name=commit,proto3" json:"commit,omitempty"` + // Module's manifest digest. Replacement for previous b1/b3 digests. + ManifestDigest string `protobuf:"bytes,6,opt,name=manifest_digest,json=manifestDigest,proto3" json:"manifest_digest,omitempty"` + CreateTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + DraftName string `protobuf:"bytes,8,opt,name=draft_name,json=draftName,proto3" json:"draft_name,omitempty"` } func (x *LocalModulePin) Reset() { @@ -178,6 +180,13 @@ func (x *LocalModulePin) GetCommit() string { return "" } +func (x *LocalModulePin) GetManifestDigest() string { + if x != nil { + return x.ManifestDigest + } + return "" +} + func (x *LocalModulePin) GetCreateTime() *timestamppb.Timestamp { if x != nil { return x.CreateTime @@ -208,39 +217,42 @@ var file_buf_alpha_registry_v1alpha1_module_proto_rawDesc = []byte{ 0x74, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, - 0x65, 0x6e, 0x63, 0x65, 0x22, 0xe0, 0x01, 0x0a, 0x0e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4d, 0x6f, + 0x65, 0x6e, 0x63, 0x65, 0x22, 0x89, 0x02, 0x0a, 0x0e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x50, 0x69, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x72, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x3b, 0x0a, - 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, - 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x64, 0x72, - 0x61, 0x66, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x64, 0x72, 0x61, 0x66, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x52, - 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x42, 0x98, 0x02, 0x0a, 0x1f, 0x63, 0x6f, 0x6d, 0x2e, - 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, - 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x0b, 0x4d, 0x6f, 0x64, - 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x59, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x62, 0x75, 0x66, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2f, - 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x2f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x2f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x31, 0x3b, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0xa2, 0x02, 0x03, 0x42, 0x41, 0x52, 0xaa, 0x02, 0x1b, 0x42, 0x75, - 0x66, 0x2e, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, - 0x2e, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x1b, 0x42, 0x75, 0x66, 0x5c, - 0x41, 0x6c, 0x70, 0x68, 0x61, 0x5c, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x5c, 0x56, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xe2, 0x02, 0x27, 0x42, 0x75, 0x66, 0x5c, 0x41, 0x6c, - 0x70, 0x68, 0x61, 0x5c, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x5c, 0x56, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0xea, 0x02, 0x1e, 0x42, 0x75, 0x66, 0x3a, 0x3a, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x3a, 0x3a, - 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x3a, 0x3a, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x27, 0x0a, + 0x0f, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, + 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x64, 0x72, 0x61, 0x66, 0x74, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x72, 0x61, 0x66, 0x74, 0x4e, 0x61, + 0x6d, 0x65, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, + 0x42, 0x98, 0x02, 0x0a, 0x1f, 0x63, 0x6f, 0x6d, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x42, 0x0b, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x59, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x62, 0x75, 0x66, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x69, + 0x76, 0x61, 0x74, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, + 0x6f, 0x2f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x72, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x72, 0x65, + 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xa2, 0x02, + 0x03, 0x42, 0x41, 0x52, 0xaa, 0x02, 0x1b, 0x42, 0x75, 0x66, 0x2e, 0x41, 0x6c, 0x70, 0x68, 0x61, + 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0xca, 0x02, 0x1b, 0x42, 0x75, 0x66, 0x5c, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x5c, 0x52, + 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0xe2, 0x02, 0x27, 0x42, 0x75, 0x66, 0x5c, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x5c, 0x52, 0x65, 0x67, + 0x69, 0x73, 0x74, 0x72, 0x79, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x5c, 0x47, + 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1e, 0x42, 0x75, 0x66, + 0x3a, 0x3a, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x3a, 0x3a, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, + 0x79, 0x3a, 0x3a, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/registry/v1alpha1/plugin_curation.pb.go b/vendor/github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/registry/v1alpha1/plugin_curation.pb.go index e4a0537dcb..d7af560313 100644 --- a/vendor/github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/registry/v1alpha1/plugin_curation.pb.go +++ b/vendor/github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/registry/v1alpha1/plugin_curation.pb.go @@ -44,6 +44,7 @@ const ( PluginRegistryType_PLUGIN_REGISTRY_TYPE_UNSPECIFIED PluginRegistryType = 0 PluginRegistryType_PLUGIN_REGISTRY_TYPE_GO PluginRegistryType = 1 PluginRegistryType_PLUGIN_REGISTRY_TYPE_NPM PluginRegistryType = 2 + PluginRegistryType_PLUGIN_REGISTRY_TYPE_MAVEN PluginRegistryType = 3 ) // Enum value maps for PluginRegistryType. @@ -52,11 +53,13 @@ var ( 0: "PLUGIN_REGISTRY_TYPE_UNSPECIFIED", 1: "PLUGIN_REGISTRY_TYPE_GO", 2: "PLUGIN_REGISTRY_TYPE_NPM", + 3: "PLUGIN_REGISTRY_TYPE_MAVEN", } PluginRegistryType_value = map[string]int32{ "PLUGIN_REGISTRY_TYPE_UNSPECIFIED": 0, "PLUGIN_REGISTRY_TYPE_GO": 1, "PLUGIN_REGISTRY_TYPE_NPM": 2, + "PLUGIN_REGISTRY_TYPE_MAVEN": 3, } ) @@ -358,6 +361,55 @@ func (x *NPMConfig) GetImportStyle() NPMImportStyle { return NPMImportStyle_NPM_IMPORT_STYLE_UNSPECIFIED } +// MavenConfig is the configuration for a Maven plugin. +type MavenConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optionally define the runtime libraries for the plugin. + RuntimeLibraries []*MavenConfig_RuntimeLibrary `protobuf:"bytes,1,rep,name=runtime_libraries,json=runtimeLibraries,proto3" json:"runtime_libraries,omitempty"` +} + +func (x *MavenConfig) Reset() { + *x = MavenConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MavenConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MavenConfig) ProtoMessage() {} + +func (x *MavenConfig) ProtoReflect() protoreflect.Message { + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MavenConfig.ProtoReflect.Descriptor instead. +func (*MavenConfig) Descriptor() ([]byte, []int) { + return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{2} +} + +func (x *MavenConfig) GetRuntimeLibraries() []*MavenConfig_RuntimeLibrary { + if x != nil { + return x.RuntimeLibraries + } + return nil +} + // RegistryConfig is the configuration for the remote registry of a plugin. type RegistryConfig struct { state protoimpl.MessageState @@ -368,6 +420,7 @@ type RegistryConfig struct { // // *RegistryConfig_GoConfig // *RegistryConfig_NpmConfig + // *RegistryConfig_MavenConfig RegistryConfig isRegistryConfig_RegistryConfig `protobuf_oneof:"registry_config"` // The options to pass to the plugin. These will // be merged into a single, comma-separated string. @@ -377,7 +430,7 @@ type RegistryConfig struct { func (x *RegistryConfig) Reset() { *x = RegistryConfig{} if protoimpl.UnsafeEnabled { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[2] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -390,7 +443,7 @@ func (x *RegistryConfig) String() string { func (*RegistryConfig) ProtoMessage() {} func (x *RegistryConfig) ProtoReflect() protoreflect.Message { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[2] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -403,7 +456,7 @@ func (x *RegistryConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use RegistryConfig.ProtoReflect.Descriptor instead. func (*RegistryConfig) Descriptor() ([]byte, []int) { - return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{2} + return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{3} } func (m *RegistryConfig) GetRegistryConfig() isRegistryConfig_RegistryConfig { @@ -427,6 +480,13 @@ func (x *RegistryConfig) GetNpmConfig() *NPMConfig { return nil } +func (x *RegistryConfig) GetMavenConfig() *MavenConfig { + if x, ok := x.GetRegistryConfig().(*RegistryConfig_MavenConfig); ok { + return x.MavenConfig + } + return nil +} + func (x *RegistryConfig) GetOptions() []string { if x != nil { return x.Options @@ -446,10 +506,16 @@ type RegistryConfig_NpmConfig struct { NpmConfig *NPMConfig `protobuf:"bytes,2,opt,name=npm_config,json=npmConfig,proto3,oneof"` } +type RegistryConfig_MavenConfig struct { + MavenConfig *MavenConfig `protobuf:"bytes,3,opt,name=maven_config,json=mavenConfig,proto3,oneof"` +} + func (*RegistryConfig_GoConfig) isRegistryConfig_RegistryConfig() {} func (*RegistryConfig_NpmConfig) isRegistryConfig_RegistryConfig() {} +func (*RegistryConfig_MavenConfig) isRegistryConfig_RegistryConfig() {} + type CuratedPluginReference struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -468,7 +534,7 @@ type CuratedPluginReference struct { func (x *CuratedPluginReference) Reset() { *x = CuratedPluginReference{} if protoimpl.UnsafeEnabled { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[3] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -481,7 +547,7 @@ func (x *CuratedPluginReference) String() string { func (*CuratedPluginReference) ProtoMessage() {} func (x *CuratedPluginReference) ProtoReflect() protoreflect.Message { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[3] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -494,7 +560,7 @@ func (x *CuratedPluginReference) ProtoReflect() protoreflect.Message { // Deprecated: Use CuratedPluginReference.ProtoReflect.Descriptor instead. func (*CuratedPluginReference) Descriptor() ([]byte, []int) { - return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{3} + return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{4} } func (x *CuratedPluginReference) GetOwner() string { @@ -572,7 +638,7 @@ type CuratedPlugin struct { func (x *CuratedPlugin) Reset() { *x = CuratedPlugin{} if protoimpl.UnsafeEnabled { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[4] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -585,7 +651,7 @@ func (x *CuratedPlugin) String() string { func (*CuratedPlugin) ProtoMessage() {} func (x *CuratedPlugin) ProtoReflect() protoreflect.Message { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[4] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -598,7 +664,7 @@ func (x *CuratedPlugin) ProtoReflect() protoreflect.Message { // Deprecated: Use CuratedPlugin.ProtoReflect.Descriptor instead. func (*CuratedPlugin) Descriptor() ([]byte, []int) { - return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{4} + return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{5} } func (x *CuratedPlugin) GetId() string { @@ -736,7 +802,7 @@ type GenerateCodeRequest struct { func (x *GenerateCodeRequest) Reset() { *x = GenerateCodeRequest{} if protoimpl.UnsafeEnabled { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[5] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -749,7 +815,7 @@ func (x *GenerateCodeRequest) String() string { func (*GenerateCodeRequest) ProtoMessage() {} func (x *GenerateCodeRequest) ProtoReflect() protoreflect.Message { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[5] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -762,7 +828,7 @@ func (x *GenerateCodeRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GenerateCodeRequest.ProtoReflect.Descriptor instead. func (*GenerateCodeRequest) Descriptor() ([]byte, []int) { - return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{5} + return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{6} } func (x *GenerateCodeRequest) GetImage() *v1.Image { @@ -806,7 +872,7 @@ type GenerateCodeResponse struct { func (x *GenerateCodeResponse) Reset() { *x = GenerateCodeResponse{} if protoimpl.UnsafeEnabled { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[6] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -819,7 +885,7 @@ func (x *GenerateCodeResponse) String() string { func (*GenerateCodeResponse) ProtoMessage() {} func (x *GenerateCodeResponse) ProtoReflect() protoreflect.Message { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[6] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -832,7 +898,7 @@ func (x *GenerateCodeResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GenerateCodeResponse.ProtoReflect.Descriptor instead. func (*GenerateCodeResponse) Descriptor() ([]byte, []int) { - return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{6} + return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{7} } func (x *GenerateCodeResponse) GetResponses() []*PluginGenerationResponse { @@ -858,7 +924,7 @@ type PluginGenerationRequest struct { func (x *PluginGenerationRequest) Reset() { *x = PluginGenerationRequest{} if protoimpl.UnsafeEnabled { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[7] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -871,7 +937,7 @@ func (x *PluginGenerationRequest) String() string { func (*PluginGenerationRequest) ProtoMessage() {} func (x *PluginGenerationRequest) ProtoReflect() protoreflect.Message { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[7] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -884,7 +950,7 @@ func (x *PluginGenerationRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PluginGenerationRequest.ProtoReflect.Descriptor instead. func (*PluginGenerationRequest) Descriptor() ([]byte, []int) { - return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{7} + return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{8} } func (x *PluginGenerationRequest) GetPluginReference() *CuratedPluginReference { @@ -914,7 +980,7 @@ type PluginGenerationResponse struct { func (x *PluginGenerationResponse) Reset() { *x = PluginGenerationResponse{} if protoimpl.UnsafeEnabled { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[8] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -927,7 +993,7 @@ func (x *PluginGenerationResponse) String() string { func (*PluginGenerationResponse) ProtoMessage() {} func (x *PluginGenerationResponse) ProtoReflect() protoreflect.Message { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[8] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -940,7 +1006,7 @@ func (x *PluginGenerationResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PluginGenerationResponse.ProtoReflect.Descriptor instead. func (*PluginGenerationResponse) Descriptor() ([]byte, []int) { - return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{8} + return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{9} } func (x *PluginGenerationResponse) GetResponse() *pluginpb.CodeGeneratorResponse { @@ -967,7 +1033,7 @@ type DeleteCuratedPluginRequest struct { func (x *DeleteCuratedPluginRequest) Reset() { *x = DeleteCuratedPluginRequest{} if protoimpl.UnsafeEnabled { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[9] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -980,7 +1046,7 @@ func (x *DeleteCuratedPluginRequest) String() string { func (*DeleteCuratedPluginRequest) ProtoMessage() {} func (x *DeleteCuratedPluginRequest) ProtoReflect() protoreflect.Message { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[9] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -993,7 +1059,7 @@ func (x *DeleteCuratedPluginRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteCuratedPluginRequest.ProtoReflect.Descriptor instead. func (*DeleteCuratedPluginRequest) Descriptor() ([]byte, []int) { - return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{9} + return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{10} } func (x *DeleteCuratedPluginRequest) GetOwner() string { @@ -1026,7 +1092,7 @@ type DeleteCuratedPluginResponse struct { func (x *DeleteCuratedPluginResponse) Reset() { *x = DeleteCuratedPluginResponse{} if protoimpl.UnsafeEnabled { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[10] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1039,7 +1105,7 @@ func (x *DeleteCuratedPluginResponse) String() string { func (*DeleteCuratedPluginResponse) ProtoMessage() {} func (x *DeleteCuratedPluginResponse) ProtoReflect() protoreflect.Message { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[10] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1052,7 +1118,7 @@ func (x *DeleteCuratedPluginResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteCuratedPluginResponse.ProtoReflect.Descriptor instead. func (*DeleteCuratedPluginResponse) Descriptor() ([]byte, []int) { - return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{10} + return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{11} } type CreateCuratedPluginRequest struct { @@ -1094,7 +1160,7 @@ type CreateCuratedPluginRequest struct { func (x *CreateCuratedPluginRequest) Reset() { *x = CreateCuratedPluginRequest{} if protoimpl.UnsafeEnabled { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[11] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1107,7 +1173,7 @@ func (x *CreateCuratedPluginRequest) String() string { func (*CreateCuratedPluginRequest) ProtoMessage() {} func (x *CreateCuratedPluginRequest) ProtoReflect() protoreflect.Message { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[11] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1120,7 +1186,7 @@ func (x *CreateCuratedPluginRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateCuratedPluginRequest.ProtoReflect.Descriptor instead. func (*CreateCuratedPluginRequest) Descriptor() ([]byte, []int) { - return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{11} + return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{12} } func (x *CreateCuratedPluginRequest) GetOwner() string { @@ -1226,7 +1292,7 @@ type CreateCuratedPluginResponse struct { func (x *CreateCuratedPluginResponse) Reset() { *x = CreateCuratedPluginResponse{} if protoimpl.UnsafeEnabled { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[12] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1239,7 +1305,7 @@ func (x *CreateCuratedPluginResponse) String() string { func (*CreateCuratedPluginResponse) ProtoMessage() {} func (x *CreateCuratedPluginResponse) ProtoReflect() protoreflect.Message { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[12] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1252,7 +1318,7 @@ func (x *CreateCuratedPluginResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateCuratedPluginResponse.ProtoReflect.Descriptor instead. func (*CreateCuratedPluginResponse) Descriptor() ([]byte, []int) { - return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{12} + return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{13} } func (x *CreateCuratedPluginResponse) GetConfiguration() *CuratedPlugin { @@ -1276,7 +1342,7 @@ type ListCuratedPluginsRequest struct { func (x *ListCuratedPluginsRequest) Reset() { *x = ListCuratedPluginsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[13] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1289,7 +1355,7 @@ func (x *ListCuratedPluginsRequest) String() string { func (*ListCuratedPluginsRequest) ProtoMessage() {} func (x *ListCuratedPluginsRequest) ProtoReflect() protoreflect.Message { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[13] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1302,7 +1368,7 @@ func (x *ListCuratedPluginsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListCuratedPluginsRequest.ProtoReflect.Descriptor instead. func (*ListCuratedPluginsRequest) Descriptor() ([]byte, []int) { - return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{13} + return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{14} } func (x *ListCuratedPluginsRequest) GetPageSize() uint32 { @@ -1339,7 +1405,7 @@ type ListCuratedPluginsResponse struct { func (x *ListCuratedPluginsResponse) Reset() { *x = ListCuratedPluginsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[14] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1352,7 +1418,7 @@ func (x *ListCuratedPluginsResponse) String() string { func (*ListCuratedPluginsResponse) ProtoMessage() {} func (x *ListCuratedPluginsResponse) ProtoReflect() protoreflect.Message { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[14] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1365,7 +1431,7 @@ func (x *ListCuratedPluginsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListCuratedPluginsResponse.ProtoReflect.Descriptor instead. func (*ListCuratedPluginsResponse) Descriptor() ([]byte, []int) { - return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{14} + return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{15} } func (x *ListCuratedPluginsResponse) GetPlugins() []*CuratedPlugin { @@ -1404,7 +1470,7 @@ type GetLatestCuratedPluginRequest struct { func (x *GetLatestCuratedPluginRequest) Reset() { *x = GetLatestCuratedPluginRequest{} if protoimpl.UnsafeEnabled { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[15] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1417,7 +1483,7 @@ func (x *GetLatestCuratedPluginRequest) String() string { func (*GetLatestCuratedPluginRequest) ProtoMessage() {} func (x *GetLatestCuratedPluginRequest) ProtoReflect() protoreflect.Message { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[15] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1430,7 +1496,7 @@ func (x *GetLatestCuratedPluginRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetLatestCuratedPluginRequest.ProtoReflect.Descriptor instead. func (*GetLatestCuratedPluginRequest) Descriptor() ([]byte, []int) { - return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{15} + return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{16} } func (x *GetLatestCuratedPluginRequest) GetOwner() string { @@ -1474,7 +1540,7 @@ type GetLatestCuratedPluginResponse struct { func (x *GetLatestCuratedPluginResponse) Reset() { *x = GetLatestCuratedPluginResponse{} if protoimpl.UnsafeEnabled { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[16] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1487,7 +1553,7 @@ func (x *GetLatestCuratedPluginResponse) String() string { func (*GetLatestCuratedPluginResponse) ProtoMessage() {} func (x *GetLatestCuratedPluginResponse) ProtoReflect() protoreflect.Message { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[16] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1500,7 +1566,7 @@ func (x *GetLatestCuratedPluginResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetLatestCuratedPluginResponse.ProtoReflect.Descriptor instead. func (*GetLatestCuratedPluginResponse) Descriptor() ([]byte, []int) { - return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{16} + return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{17} } func (x *GetLatestCuratedPluginResponse) GetPlugin() *CuratedPlugin { @@ -1530,7 +1596,7 @@ type CuratedPluginVersionRevisions struct { func (x *CuratedPluginVersionRevisions) Reset() { *x = CuratedPluginVersionRevisions{} if protoimpl.UnsafeEnabled { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[17] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1543,7 +1609,7 @@ func (x *CuratedPluginVersionRevisions) String() string { func (*CuratedPluginVersionRevisions) ProtoMessage() {} func (x *CuratedPluginVersionRevisions) ProtoReflect() protoreflect.Message { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[17] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1556,7 +1622,7 @@ func (x *CuratedPluginVersionRevisions) ProtoReflect() protoreflect.Message { // Deprecated: Use CuratedPluginVersionRevisions.ProtoReflect.Descriptor instead. func (*CuratedPluginVersionRevisions) Descriptor() ([]byte, []int) { - return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{17} + return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{18} } func (x *CuratedPluginVersionRevisions) GetVersion() string { @@ -1588,7 +1654,7 @@ type GoConfig_RuntimeLibrary struct { func (x *GoConfig_RuntimeLibrary) Reset() { *x = GoConfig_RuntimeLibrary{} if protoimpl.UnsafeEnabled { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[18] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1601,7 +1667,7 @@ func (x *GoConfig_RuntimeLibrary) String() string { func (*GoConfig_RuntimeLibrary) ProtoMessage() {} func (x *GoConfig_RuntimeLibrary) ProtoReflect() protoreflect.Message { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[18] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1646,7 +1712,7 @@ type NPMConfig_RuntimeLibrary struct { func (x *NPMConfig_RuntimeLibrary) Reset() { *x = NPMConfig_RuntimeLibrary{} if protoimpl.UnsafeEnabled { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[19] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1659,7 +1725,7 @@ func (x *NPMConfig_RuntimeLibrary) String() string { func (*NPMConfig_RuntimeLibrary) ProtoMessage() {} func (x *NPMConfig_RuntimeLibrary) ProtoReflect() protoreflect.Message { - mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[19] + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1689,6 +1755,56 @@ func (x *NPMConfig_RuntimeLibrary) GetVersion() string { return "" } +// RuntimeLibrary describes the runtime library dependency of the generated code. +type MavenConfig_RuntimeLibrary struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // gav is the GAV (groupId:artifactId:version) of the dependency. + // See https://maven.apache.org/repositories/artifacts.html for more information. + Gav string `protobuf:"bytes,1,opt,name=gav,proto3" json:"gav,omitempty"` +} + +func (x *MavenConfig_RuntimeLibrary) Reset() { + *x = MavenConfig_RuntimeLibrary{} + if protoimpl.UnsafeEnabled { + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MavenConfig_RuntimeLibrary) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MavenConfig_RuntimeLibrary) ProtoMessage() {} + +func (x *MavenConfig_RuntimeLibrary) ProtoReflect() protoreflect.Message { + mi := &file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MavenConfig_RuntimeLibrary.ProtoReflect.Descriptor instead. +func (*MavenConfig_RuntimeLibrary) Descriptor() ([]byte, []int) { + return file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *MavenConfig_RuntimeLibrary) GetGav() string { + if x != nil { + return x.Gav + } + return "" +} + var File_buf_alpha_registry_v1alpha1_plugin_curation_proto protoreflect.FileDescriptor var file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDesc = []byte{ @@ -1737,320 +1853,337 @@ var file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDesc = []byte{ 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xd2, 0x01, 0x0a, 0x0e, 0x52, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x44, 0x0a, - 0x09, 0x67, 0x6f, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x25, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, - 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x47, - 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x08, 0x67, 0x6f, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x47, 0x0a, 0x0a, 0x6e, 0x70, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4e, 0x50, 0x4d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, - 0x00, 0x52, 0x09, 0x6e, 0x70, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x11, 0x0a, 0x0f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, - 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x0a, 0x22, - 0x78, 0x0a, 0x16, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, - 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xf5, 0x05, 0x0a, 0x0d, 0x43, 0x75, - 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6f, - 0x77, 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, - 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x54, 0x0a, 0x0d, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, - 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x62, - 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, - 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x72, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x49, 0x6d, 0x61, 0x67, 0x65, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x0b, 0x63, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x0c, 0x64, 0x65, 0x70, 0x65, - 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, - 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, - 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x75, 0x72, - 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, - 0x6e, 0x63, 0x65, 0x52, 0x0c, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, - 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x72, 0x6c, - 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x54, 0x0a, 0x0f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x62, 0x75, - 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, - 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, - 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, - 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x76, 0x69, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x72, 0x65, 0x76, 0x69, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x56, 0x0a, 0x10, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x6c, - 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2b, - 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, - 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x52, 0x0f, 0x6f, 0x75, 0x74, - 0x70, 0x75, 0x74, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, - 0x73, 0x70, 0x64, 0x78, 0x5f, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x18, - 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x70, 0x64, 0x78, 0x4c, 0x69, 0x63, 0x65, 0x6e, - 0x73, 0x65, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x5f, - 0x75, 0x72, 0x6c, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6c, 0x69, 0x63, 0x65, 0x6e, - 0x73, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, - 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, - 0x64, 0x22, 0xfa, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, - 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x05, 0x69, 0x6d, 0x61, - 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, - 0x61, 0x67, 0x65, 0x52, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x50, 0x0a, 0x08, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x62, + 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x97, 0x01, 0x0a, 0x0b, 0x4d, + 0x61, 0x76, 0x65, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x64, 0x0a, 0x11, 0x72, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x69, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x31, 0x2e, 0x4d, 0x61, 0x76, 0x65, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x52, 0x10, + 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x69, 0x65, 0x73, + 0x1a, 0x22, 0x0a, 0x0e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x69, 0x62, 0x72, 0x61, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x61, 0x76, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x67, 0x61, 0x76, 0x22, 0xa1, 0x02, 0x0a, 0x0e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, + 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x44, 0x0a, 0x09, 0x67, 0x6f, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x62, 0x75, 0x66, + 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x47, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x48, 0x00, 0x52, 0x08, 0x67, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x47, 0x0a, + 0x0a, 0x6e, 0x70, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x26, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, + 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x4e, 0x50, 0x4d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x70, 0x6d, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4d, 0x0a, 0x0c, 0x6d, 0x61, 0x76, 0x65, 0x6e, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, - 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x27, 0x0a, 0x0f, - 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x49, 0x6d, - 0x70, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, - 0x5f, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, - 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x22, 0x6b, - 0x0a, 0x14, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x62, 0x75, 0x66, 0x2e, + 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4d, 0x61, 0x76, 0x65, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x61, 0x76, 0x65, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, + 0x11, 0x0a, 0x0f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x0a, 0x22, 0x78, 0x0a, 0x16, 0x43, 0x75, 0x72, 0x61, + 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, + 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0xf5, 0x05, 0x0a, 0x0d, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x54, + 0x0a, 0x0d, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, + 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x34, + 0x0a, 0x16, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x6d, 0x61, 0x67, + 0x65, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x44, 0x69, + 0x67, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, + 0x65, 0x12, 0x57, 0x0a, 0x0c, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, + 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0c, 0x64, 0x65, + 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x54, 0x0a, 0x0f, 0x72, + 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x56, 0x0a, + 0x10, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x4c, 0x61, 0x6e, 0x67, + 0x75, 0x61, 0x67, 0x65, 0x52, 0x0f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4c, 0x61, 0x6e, 0x67, + 0x75, 0x61, 0x67, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x70, 0x64, 0x78, 0x5f, 0x6c, 0x69, + 0x63, 0x65, 0x6e, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x73, 0x70, 0x64, 0x78, 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x49, 0x64, 0x12, 0x1f, 0x0a, + 0x0b, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x1a, + 0x0a, 0x08, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x22, 0xfa, 0x01, 0x0a, 0x13, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x69, 0x6d, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x05, 0x69, 0x6d, + 0x61, 0x67, 0x65, 0x12, 0x50, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x08, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, + 0x5f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, + 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x37, + 0x0a, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, + 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x15, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, + 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x22, 0x6b, 0x0a, 0x14, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x53, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, + 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x73, 0x22, 0x93, 0x01, 0x0a, 0x17, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x5e, 0x0a, 0x10, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, + 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x62, 0x75, 0x66, + 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, + 0x0f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x67, 0x0a, 0x18, 0x50, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, + 0x6c, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x60, 0x0a, 0x1a, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x75, 0x72, + 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x1d, 0x0a, 0x1b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, + 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x99, 0x05, 0x0a, 0x1a, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, + 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x54, 0x0a, + 0x0d, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, + 0x79, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, + 0x16, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, + 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x44, 0x69, 0x67, + 0x65, 0x73, 0x74, 0x12, 0x57, 0x0a, 0x0c, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, + 0x69, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x22, 0x93, 0x01, 0x0a, 0x17, - 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x5e, 0x0a, 0x10, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x33, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, - 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, - 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x66, - 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, - 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x22, 0x67, 0x0a, 0x18, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, - 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x60, 0x0a, 0x1a, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x1d, 0x0a, 0x1b, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x99, 0x05, 0x0a, 0x1a, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, - 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x54, 0x0a, 0x0d, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x62, 0x75, - 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, - 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x72, 0x65, - 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, - 0x6d, 0x61, 0x67, 0x65, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x57, 0x0a, 0x0c, 0x64, 0x65, - 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x33, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, - 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, - 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x66, 0x65, - 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0c, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, - 0x69, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x75, 0x72, - 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, - 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x54, 0x0a, 0x0f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, - 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, - 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, - 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x72, 0x65, - 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x56, 0x0a, 0x10, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, - 0x5f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0e, - 0x32, 0x2b, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, - 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x52, 0x0f, 0x6f, - 0x75, 0x74, 0x70, 0x75, 0x74, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x73, 0x12, 0x26, - 0x0a, 0x0f, 0x73, 0x70, 0x64, 0x78, 0x5f, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x5f, 0x69, - 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x70, 0x64, 0x78, 0x4c, 0x69, 0x63, - 0x65, 0x6e, 0x73, 0x65, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, - 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6c, 0x69, 0x63, - 0x65, 0x6e, 0x73, 0x65, 0x55, 0x72, 0x6c, 0x22, 0x6f, 0x0a, 0x1b, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, - 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, - 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x75, 0x72, 0x61, - 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x71, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, - 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, - 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, - 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x76, 0x65, 0x72, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x07, 0x72, 0x65, 0x76, 0x65, 0x72, 0x73, 0x65, 0x22, 0x8a, 0x01, 0x0a, 0x1a, - 0x4c, 0x69, 0x73, 0x74, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x07, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x62, 0x75, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0c, + 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x54, 0x0a, + 0x0f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x0e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x0d, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x56, 0x0a, 0x10, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, + 0x67, 0x65, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x62, 0x75, 0x66, 0x2e, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x4c, 0x61, + 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x52, 0x0f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4c, 0x61, + 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x70, 0x64, 0x78, 0x5f, + 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x73, 0x70, 0x64, 0x78, 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x49, 0x64, 0x12, + 0x1f, 0x0a, 0x0b, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x55, 0x72, 0x6c, + 0x22, 0x6f, 0x0a, 0x1b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x50, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x22, 0x71, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, + 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, + 0x76, 0x65, 0x72, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x72, 0x65, 0x76, + 0x65, 0x72, 0x73, 0x65, 0x22, 0x8a, 0x01, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x75, 0x72, + 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x07, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x2e, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x52, 0x07, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, + 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x22, 0x7f, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x43, 0x75, + 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0xbc, 0x01, 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, + 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x31, 0x2e, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x52, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x56, 0x0a, 0x08, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, - 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x07, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, - 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, - 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x7f, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x4c, - 0x61, 0x74, 0x65, 0x73, 0x74, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, - 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xbc, 0x01, 0x0a, 0x1e, 0x47, 0x65, - 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x06, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x62, - 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, - 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x75, 0x72, 0x61, 0x74, - 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x12, 0x56, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0x2e, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x08, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x57, 0x0a, 0x1d, 0x43, 0x75, 0x72, 0x61, - 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x2a, 0x75, 0x0a, 0x12, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x67, 0x69, 0x73, - 0x74, 0x72, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x24, 0x0a, 0x20, 0x50, 0x4c, 0x55, 0x47, 0x49, + 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x22, 0x57, 0x0a, 0x1d, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, + 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0d, 0x52, + 0x09, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2a, 0x95, 0x01, 0x0a, 0x12, 0x50, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x24, 0x0a, 0x20, 0x50, 0x4c, 0x55, 0x47, 0x49, 0x4e, 0x5f, 0x52, 0x45, 0x47, 0x49, + 0x53, 0x54, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x50, 0x4c, 0x55, 0x47, 0x49, 0x4e, 0x5f, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, - 0x17, 0x50, 0x4c, 0x55, 0x47, 0x49, 0x4e, 0x5f, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x52, 0x59, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x4f, 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x50, 0x4c, - 0x55, 0x47, 0x49, 0x4e, 0x5f, 0x52, 0x45, 0x47, 0x49, 0x53, 0x54, 0x52, 0x59, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x4e, 0x50, 0x4d, 0x10, 0x02, 0x2a, 0xce, 0x03, 0x0a, 0x0e, 0x50, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x50, - 0x4c, 0x55, 0x47, 0x49, 0x4e, 0x5f, 0x4c, 0x41, 0x4e, 0x47, 0x55, 0x41, 0x47, 0x45, 0x5f, 0x55, - 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, + 0x47, 0x4f, 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x50, 0x4c, 0x55, 0x47, 0x49, 0x4e, 0x5f, 0x52, + 0x45, 0x47, 0x49, 0x53, 0x54, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4e, 0x50, 0x4d, + 0x10, 0x02, 0x12, 0x1e, 0x0a, 0x1a, 0x50, 0x4c, 0x55, 0x47, 0x49, 0x4e, 0x5f, 0x52, 0x45, 0x47, + 0x49, 0x53, 0x54, 0x52, 0x59, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x41, 0x56, 0x45, 0x4e, + 0x10, 0x03, 0x2a, 0xce, 0x03, 0x0a, 0x0e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x4c, 0x61, 0x6e, + 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x50, 0x4c, 0x55, 0x47, 0x49, 0x4e, 0x5f, + 0x4c, 0x41, 0x4e, 0x47, 0x55, 0x41, 0x47, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x50, 0x4c, 0x55, 0x47, 0x49, 0x4e, + 0x5f, 0x4c, 0x41, 0x4e, 0x47, 0x55, 0x41, 0x47, 0x45, 0x5f, 0x47, 0x4f, 0x10, 0x01, 0x12, 0x1e, + 0x0a, 0x1a, 0x50, 0x4c, 0x55, 0x47, 0x49, 0x4e, 0x5f, 0x4c, 0x41, 0x4e, 0x47, 0x55, 0x41, 0x47, + 0x45, 0x5f, 0x4a, 0x41, 0x56, 0x41, 0x53, 0x43, 0x52, 0x49, 0x50, 0x54, 0x10, 0x02, 0x12, 0x1e, + 0x0a, 0x1a, 0x50, 0x4c, 0x55, 0x47, 0x49, 0x4e, 0x5f, 0x4c, 0x41, 0x4e, 0x47, 0x55, 0x41, 0x47, + 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x53, 0x43, 0x52, 0x49, 0x50, 0x54, 0x10, 0x03, 0x12, 0x19, + 0x0a, 0x15, 0x50, 0x4c, 0x55, 0x47, 0x49, 0x4e, 0x5f, 0x4c, 0x41, 0x4e, 0x47, 0x55, 0x41, 0x47, + 0x45, 0x5f, 0x53, 0x57, 0x49, 0x46, 0x54, 0x10, 0x04, 0x12, 0x17, 0x0a, 0x13, 0x50, 0x4c, 0x55, + 0x47, 0x49, 0x4e, 0x5f, 0x4c, 0x41, 0x4e, 0x47, 0x55, 0x41, 0x47, 0x45, 0x5f, 0x43, 0x50, 0x50, + 0x10, 0x05, 0x12, 0x18, 0x0a, 0x14, 0x50, 0x4c, 0x55, 0x47, 0x49, 0x4e, 0x5f, 0x4c, 0x41, 0x4e, + 0x47, 0x55, 0x41, 0x47, 0x45, 0x5f, 0x4a, 0x41, 0x56, 0x41, 0x10, 0x06, 0x12, 0x18, 0x0a, 0x14, 0x50, 0x4c, 0x55, 0x47, 0x49, 0x4e, 0x5f, 0x4c, 0x41, 0x4e, 0x47, 0x55, 0x41, 0x47, 0x45, 0x5f, - 0x47, 0x4f, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x50, 0x4c, 0x55, 0x47, 0x49, 0x4e, 0x5f, 0x4c, - 0x41, 0x4e, 0x47, 0x55, 0x41, 0x47, 0x45, 0x5f, 0x4a, 0x41, 0x56, 0x41, 0x53, 0x43, 0x52, 0x49, - 0x50, 0x54, 0x10, 0x02, 0x12, 0x1e, 0x0a, 0x1a, 0x50, 0x4c, 0x55, 0x47, 0x49, 0x4e, 0x5f, 0x4c, - 0x41, 0x4e, 0x47, 0x55, 0x41, 0x47, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x53, 0x43, 0x52, 0x49, - 0x50, 0x54, 0x10, 0x03, 0x12, 0x19, 0x0a, 0x15, 0x50, 0x4c, 0x55, 0x47, 0x49, 0x4e, 0x5f, 0x4c, - 0x41, 0x4e, 0x47, 0x55, 0x41, 0x47, 0x45, 0x5f, 0x53, 0x57, 0x49, 0x46, 0x54, 0x10, 0x04, 0x12, - 0x17, 0x0a, 0x13, 0x50, 0x4c, 0x55, 0x47, 0x49, 0x4e, 0x5f, 0x4c, 0x41, 0x4e, 0x47, 0x55, 0x41, - 0x47, 0x45, 0x5f, 0x43, 0x50, 0x50, 0x10, 0x05, 0x12, 0x18, 0x0a, 0x14, 0x50, 0x4c, 0x55, 0x47, - 0x49, 0x4e, 0x5f, 0x4c, 0x41, 0x4e, 0x47, 0x55, 0x41, 0x47, 0x45, 0x5f, 0x4a, 0x41, 0x56, 0x41, - 0x10, 0x06, 0x12, 0x18, 0x0a, 0x14, 0x50, 0x4c, 0x55, 0x47, 0x49, 0x4e, 0x5f, 0x4c, 0x41, 0x4e, - 0x47, 0x55, 0x41, 0x47, 0x45, 0x5f, 0x44, 0x41, 0x52, 0x54, 0x10, 0x07, 0x12, 0x18, 0x0a, 0x14, + 0x44, 0x41, 0x52, 0x54, 0x10, 0x07, 0x12, 0x18, 0x0a, 0x14, 0x50, 0x4c, 0x55, 0x47, 0x49, 0x4e, + 0x5f, 0x4c, 0x41, 0x4e, 0x47, 0x55, 0x41, 0x47, 0x45, 0x5f, 0x52, 0x55, 0x53, 0x54, 0x10, 0x08, + 0x12, 0x1a, 0x0a, 0x16, 0x50, 0x4c, 0x55, 0x47, 0x49, 0x4e, 0x5f, 0x4c, 0x41, 0x4e, 0x47, 0x55, + 0x41, 0x47, 0x45, 0x5f, 0x50, 0x59, 0x54, 0x48, 0x4f, 0x4e, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x50, 0x4c, 0x55, 0x47, 0x49, 0x4e, 0x5f, 0x4c, 0x41, 0x4e, 0x47, 0x55, 0x41, 0x47, 0x45, 0x5f, - 0x52, 0x55, 0x53, 0x54, 0x10, 0x08, 0x12, 0x1a, 0x0a, 0x16, 0x50, 0x4c, 0x55, 0x47, 0x49, 0x4e, - 0x5f, 0x4c, 0x41, 0x4e, 0x47, 0x55, 0x41, 0x47, 0x45, 0x5f, 0x50, 0x59, 0x54, 0x48, 0x4f, 0x4e, - 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x50, 0x4c, 0x55, 0x47, 0x49, 0x4e, 0x5f, 0x4c, 0x41, 0x4e, - 0x47, 0x55, 0x41, 0x47, 0x45, 0x5f, 0x52, 0x55, 0x42, 0x59, 0x10, 0x0a, 0x12, 0x1a, 0x0a, 0x16, + 0x52, 0x55, 0x42, 0x59, 0x10, 0x0a, 0x12, 0x1a, 0x0a, 0x16, 0x50, 0x4c, 0x55, 0x47, 0x49, 0x4e, + 0x5f, 0x4c, 0x41, 0x4e, 0x47, 0x55, 0x41, 0x47, 0x45, 0x5f, 0x4b, 0x4f, 0x54, 0x4c, 0x49, 0x4e, + 0x10, 0x0b, 0x12, 0x1f, 0x0a, 0x1b, 0x50, 0x4c, 0x55, 0x47, 0x49, 0x4e, 0x5f, 0x4c, 0x41, 0x4e, + 0x47, 0x55, 0x41, 0x47, 0x45, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x49, 0x56, 0x45, 0x5f, + 0x43, 0x10, 0x0c, 0x12, 0x17, 0x0a, 0x13, 0x50, 0x4c, 0x55, 0x47, 0x49, 0x4e, 0x5f, 0x4c, 0x41, + 0x4e, 0x47, 0x55, 0x41, 0x47, 0x45, 0x5f, 0x50, 0x48, 0x50, 0x10, 0x0d, 0x12, 0x1a, 0x0a, 0x16, 0x50, 0x4c, 0x55, 0x47, 0x49, 0x4e, 0x5f, 0x4c, 0x41, 0x4e, 0x47, 0x55, 0x41, 0x47, 0x45, 0x5f, - 0x4b, 0x4f, 0x54, 0x4c, 0x49, 0x4e, 0x10, 0x0b, 0x12, 0x1f, 0x0a, 0x1b, 0x50, 0x4c, 0x55, 0x47, - 0x49, 0x4e, 0x5f, 0x4c, 0x41, 0x4e, 0x47, 0x55, 0x41, 0x47, 0x45, 0x5f, 0x4f, 0x42, 0x4a, 0x45, - 0x43, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x43, 0x10, 0x0c, 0x12, 0x17, 0x0a, 0x13, 0x50, 0x4c, 0x55, - 0x47, 0x49, 0x4e, 0x5f, 0x4c, 0x41, 0x4e, 0x47, 0x55, 0x41, 0x47, 0x45, 0x5f, 0x50, 0x48, 0x50, - 0x10, 0x0d, 0x12, 0x1a, 0x0a, 0x16, 0x50, 0x4c, 0x55, 0x47, 0x49, 0x4e, 0x5f, 0x4c, 0x41, 0x4e, - 0x47, 0x55, 0x41, 0x47, 0x45, 0x5f, 0x43, 0x53, 0x48, 0x41, 0x52, 0x50, 0x10, 0x0e, 0x12, 0x19, - 0x0a, 0x15, 0x50, 0x4c, 0x55, 0x47, 0x49, 0x4e, 0x5f, 0x4c, 0x41, 0x4e, 0x47, 0x55, 0x41, 0x47, - 0x45, 0x5f, 0x53, 0x43, 0x41, 0x4c, 0x41, 0x10, 0x0f, 0x2a, 0x6e, 0x0a, 0x0e, 0x4e, 0x50, 0x4d, - 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x74, 0x79, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x1c, 0x4e, - 0x50, 0x4d, 0x5f, 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x53, 0x54, 0x59, 0x4c, 0x45, 0x5f, - 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, - 0x17, 0x4e, 0x50, 0x4d, 0x5f, 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x53, 0x54, 0x59, 0x4c, - 0x45, 0x5f, 0x4d, 0x4f, 0x44, 0x55, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x4e, 0x50, - 0x4d, 0x5f, 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x53, 0x54, 0x59, 0x4c, 0x45, 0x5f, 0x43, - 0x4f, 0x4d, 0x4d, 0x4f, 0x4e, 0x4a, 0x53, 0x10, 0x02, 0x32, 0xc9, 0x04, 0x0a, 0x15, 0x50, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x43, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x12, 0x85, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x75, 0x72, 0x61, - 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x12, 0x36, 0x2e, 0x62, 0x75, 0x66, - 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x75, 0x72, - 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x88, 0x01, 0x0a, 0x13, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x12, 0x37, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, - 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x62, + 0x43, 0x53, 0x48, 0x41, 0x52, 0x50, 0x10, 0x0e, 0x12, 0x19, 0x0a, 0x15, 0x50, 0x4c, 0x55, 0x47, + 0x49, 0x4e, 0x5f, 0x4c, 0x41, 0x4e, 0x47, 0x55, 0x41, 0x47, 0x45, 0x5f, 0x53, 0x43, 0x41, 0x4c, + 0x41, 0x10, 0x0f, 0x2a, 0x6e, 0x0a, 0x0e, 0x4e, 0x50, 0x4d, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, + 0x53, 0x74, 0x79, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x1c, 0x4e, 0x50, 0x4d, 0x5f, 0x49, 0x4d, 0x50, + 0x4f, 0x52, 0x54, 0x5f, 0x53, 0x54, 0x59, 0x4c, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x4e, 0x50, 0x4d, 0x5f, 0x49, + 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x53, 0x54, 0x59, 0x4c, 0x45, 0x5f, 0x4d, 0x4f, 0x44, 0x55, + 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x4e, 0x50, 0x4d, 0x5f, 0x49, 0x4d, 0x50, 0x4f, + 0x52, 0x54, 0x5f, 0x53, 0x54, 0x59, 0x4c, 0x45, 0x5f, 0x43, 0x4f, 0x4d, 0x4d, 0x4f, 0x4e, 0x4a, + 0x53, 0x10, 0x02, 0x32, 0xc9, 0x04, 0x0a, 0x15, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x85, 0x01, + 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x73, 0x12, 0x36, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, - 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x91, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4c, 0x61, - 0x74, 0x65, 0x73, 0x74, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x12, 0x3a, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, - 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, - 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, - 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3b, 0x2e, + 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, + 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x88, 0x01, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x37, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, - 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4c, - 0x61, 0x74, 0x65, 0x73, 0x74, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x88, 0x01, 0x0a, 0x13, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x12, 0x37, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x62, 0x75, + 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x75, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x91, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x43, 0x75, + 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x3a, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, - 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x8c, 0x01, 0x0a, 0x15, 0x43, 0x6f, 0x64, 0x65, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, - 0x73, 0x0a, 0x0c, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, - 0x30, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x31, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, - 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x42, 0xa0, 0x02, 0x0a, 0x1f, 0x63, 0x6f, 0x6d, 0x2e, 0x62, 0x75, 0x66, - 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, - 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x13, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x43, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x59, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x62, 0x75, 0x66, 0x62, - 0x75, 0x69, 0x6c, 0x64, 0x2f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, - 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x2f, 0x62, 0x75, - 0x66, 0x2f, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, - 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, - 0x72, 0x79, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xa2, 0x02, 0x03, 0x42, 0x41, 0x52, - 0xaa, 0x02, 0x1b, 0x42, 0x75, 0x66, 0x2e, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x52, 0x65, 0x67, - 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, - 0x1b, 0x42, 0x75, 0x66, 0x5c, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x5c, 0x52, 0x65, 0x67, 0x69, 0x73, - 0x74, 0x72, 0x79, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xe2, 0x02, 0x27, 0x42, - 0x75, 0x66, 0x5c, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x5c, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, - 0x79, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1e, 0x42, 0x75, 0x66, 0x3a, 0x3a, 0x41, 0x6c, - 0x70, 0x68, 0x61, 0x3a, 0x3a, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x3a, 0x3a, 0x56, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, + 0x65, 0x73, 0x74, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3b, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x43, + 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x88, 0x01, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, + 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x37, 0x2e, 0x62, + 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, + 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x75, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, + 0x8c, 0x01, 0x0a, 0x15, 0x43, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x73, 0x0a, 0x0c, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x30, 0x2e, 0x62, 0x75, 0x66, 0x2e, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x62, 0x75, + 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0xa0, + 0x02, 0x0a, 0x1f, 0x63, 0x6f, 0x6d, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x42, 0x13, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x59, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x62, 0x75, 0x66, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2f, 0x62, + 0x75, 0x66, 0x2f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x2f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x2f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x31, 0x3b, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0xa2, 0x02, 0x03, 0x42, 0x41, 0x52, 0xaa, 0x02, 0x1b, 0x42, 0x75, 0x66, + 0x2e, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, + 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x1b, 0x42, 0x75, 0x66, 0x5c, 0x41, + 0x6c, 0x70, 0x68, 0x61, 0x5c, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x5c, 0x56, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xe2, 0x02, 0x27, 0x42, 0x75, 0x66, 0x5c, 0x41, 0x6c, 0x70, + 0x68, 0x61, 0x5c, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x5c, 0x56, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0xea, 0x02, 0x1e, 0x42, 0x75, 0x66, 0x3a, 0x3a, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x3a, 0x3a, 0x52, + 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x3a, 0x3a, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2066,74 +2199,78 @@ func file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDescGZIP() []byte } var file_buf_alpha_registry_v1alpha1_plugin_curation_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes = make([]protoimpl.MessageInfo, 20) +var file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes = make([]protoimpl.MessageInfo, 22) var file_buf_alpha_registry_v1alpha1_plugin_curation_proto_goTypes = []interface{}{ (PluginRegistryType)(0), // 0: buf.alpha.registry.v1alpha1.PluginRegistryType (PluginLanguage)(0), // 1: buf.alpha.registry.v1alpha1.PluginLanguage (NPMImportStyle)(0), // 2: buf.alpha.registry.v1alpha1.NPMImportStyle (*GoConfig)(nil), // 3: buf.alpha.registry.v1alpha1.GoConfig (*NPMConfig)(nil), // 4: buf.alpha.registry.v1alpha1.NPMConfig - (*RegistryConfig)(nil), // 5: buf.alpha.registry.v1alpha1.RegistryConfig - (*CuratedPluginReference)(nil), // 6: buf.alpha.registry.v1alpha1.CuratedPluginReference - (*CuratedPlugin)(nil), // 7: buf.alpha.registry.v1alpha1.CuratedPlugin - (*GenerateCodeRequest)(nil), // 8: buf.alpha.registry.v1alpha1.GenerateCodeRequest - (*GenerateCodeResponse)(nil), // 9: buf.alpha.registry.v1alpha1.GenerateCodeResponse - (*PluginGenerationRequest)(nil), // 10: buf.alpha.registry.v1alpha1.PluginGenerationRequest - (*PluginGenerationResponse)(nil), // 11: buf.alpha.registry.v1alpha1.PluginGenerationResponse - (*DeleteCuratedPluginRequest)(nil), // 12: buf.alpha.registry.v1alpha1.DeleteCuratedPluginRequest - (*DeleteCuratedPluginResponse)(nil), // 13: buf.alpha.registry.v1alpha1.DeleteCuratedPluginResponse - (*CreateCuratedPluginRequest)(nil), // 14: buf.alpha.registry.v1alpha1.CreateCuratedPluginRequest - (*CreateCuratedPluginResponse)(nil), // 15: buf.alpha.registry.v1alpha1.CreateCuratedPluginResponse - (*ListCuratedPluginsRequest)(nil), // 16: buf.alpha.registry.v1alpha1.ListCuratedPluginsRequest - (*ListCuratedPluginsResponse)(nil), // 17: buf.alpha.registry.v1alpha1.ListCuratedPluginsResponse - (*GetLatestCuratedPluginRequest)(nil), // 18: buf.alpha.registry.v1alpha1.GetLatestCuratedPluginRequest - (*GetLatestCuratedPluginResponse)(nil), // 19: buf.alpha.registry.v1alpha1.GetLatestCuratedPluginResponse - (*CuratedPluginVersionRevisions)(nil), // 20: buf.alpha.registry.v1alpha1.CuratedPluginVersionRevisions - (*GoConfig_RuntimeLibrary)(nil), // 21: buf.alpha.registry.v1alpha1.GoConfig.RuntimeLibrary - (*NPMConfig_RuntimeLibrary)(nil), // 22: buf.alpha.registry.v1alpha1.NPMConfig.RuntimeLibrary - (*timestamppb.Timestamp)(nil), // 23: google.protobuf.Timestamp - (*v1.Image)(nil), // 24: buf.alpha.image.v1.Image - (*pluginpb.CodeGeneratorResponse)(nil), // 25: google.protobuf.compiler.CodeGeneratorResponse + (*MavenConfig)(nil), // 5: buf.alpha.registry.v1alpha1.MavenConfig + (*RegistryConfig)(nil), // 6: buf.alpha.registry.v1alpha1.RegistryConfig + (*CuratedPluginReference)(nil), // 7: buf.alpha.registry.v1alpha1.CuratedPluginReference + (*CuratedPlugin)(nil), // 8: buf.alpha.registry.v1alpha1.CuratedPlugin + (*GenerateCodeRequest)(nil), // 9: buf.alpha.registry.v1alpha1.GenerateCodeRequest + (*GenerateCodeResponse)(nil), // 10: buf.alpha.registry.v1alpha1.GenerateCodeResponse + (*PluginGenerationRequest)(nil), // 11: buf.alpha.registry.v1alpha1.PluginGenerationRequest + (*PluginGenerationResponse)(nil), // 12: buf.alpha.registry.v1alpha1.PluginGenerationResponse + (*DeleteCuratedPluginRequest)(nil), // 13: buf.alpha.registry.v1alpha1.DeleteCuratedPluginRequest + (*DeleteCuratedPluginResponse)(nil), // 14: buf.alpha.registry.v1alpha1.DeleteCuratedPluginResponse + (*CreateCuratedPluginRequest)(nil), // 15: buf.alpha.registry.v1alpha1.CreateCuratedPluginRequest + (*CreateCuratedPluginResponse)(nil), // 16: buf.alpha.registry.v1alpha1.CreateCuratedPluginResponse + (*ListCuratedPluginsRequest)(nil), // 17: buf.alpha.registry.v1alpha1.ListCuratedPluginsRequest + (*ListCuratedPluginsResponse)(nil), // 18: buf.alpha.registry.v1alpha1.ListCuratedPluginsResponse + (*GetLatestCuratedPluginRequest)(nil), // 19: buf.alpha.registry.v1alpha1.GetLatestCuratedPluginRequest + (*GetLatestCuratedPluginResponse)(nil), // 20: buf.alpha.registry.v1alpha1.GetLatestCuratedPluginResponse + (*CuratedPluginVersionRevisions)(nil), // 21: buf.alpha.registry.v1alpha1.CuratedPluginVersionRevisions + (*GoConfig_RuntimeLibrary)(nil), // 22: buf.alpha.registry.v1alpha1.GoConfig.RuntimeLibrary + (*NPMConfig_RuntimeLibrary)(nil), // 23: buf.alpha.registry.v1alpha1.NPMConfig.RuntimeLibrary + (*MavenConfig_RuntimeLibrary)(nil), // 24: buf.alpha.registry.v1alpha1.MavenConfig.RuntimeLibrary + (*timestamppb.Timestamp)(nil), // 25: google.protobuf.Timestamp + (*v1.Image)(nil), // 26: buf.alpha.image.v1.Image + (*pluginpb.CodeGeneratorResponse)(nil), // 27: google.protobuf.compiler.CodeGeneratorResponse } var file_buf_alpha_registry_v1alpha1_plugin_curation_proto_depIdxs = []int32{ - 21, // 0: buf.alpha.registry.v1alpha1.GoConfig.runtime_libraries:type_name -> buf.alpha.registry.v1alpha1.GoConfig.RuntimeLibrary - 22, // 1: buf.alpha.registry.v1alpha1.NPMConfig.runtime_libraries:type_name -> buf.alpha.registry.v1alpha1.NPMConfig.RuntimeLibrary + 22, // 0: buf.alpha.registry.v1alpha1.GoConfig.runtime_libraries:type_name -> buf.alpha.registry.v1alpha1.GoConfig.RuntimeLibrary + 23, // 1: buf.alpha.registry.v1alpha1.NPMConfig.runtime_libraries:type_name -> buf.alpha.registry.v1alpha1.NPMConfig.RuntimeLibrary 2, // 2: buf.alpha.registry.v1alpha1.NPMConfig.import_style:type_name -> buf.alpha.registry.v1alpha1.NPMImportStyle - 3, // 3: buf.alpha.registry.v1alpha1.RegistryConfig.go_config:type_name -> buf.alpha.registry.v1alpha1.GoConfig - 4, // 4: buf.alpha.registry.v1alpha1.RegistryConfig.npm_config:type_name -> buf.alpha.registry.v1alpha1.NPMConfig - 0, // 5: buf.alpha.registry.v1alpha1.CuratedPlugin.registry_type:type_name -> buf.alpha.registry.v1alpha1.PluginRegistryType - 23, // 6: buf.alpha.registry.v1alpha1.CuratedPlugin.create_time:type_name -> google.protobuf.Timestamp - 6, // 7: buf.alpha.registry.v1alpha1.CuratedPlugin.dependencies:type_name -> buf.alpha.registry.v1alpha1.CuratedPluginReference - 5, // 8: buf.alpha.registry.v1alpha1.CuratedPlugin.registry_config:type_name -> buf.alpha.registry.v1alpha1.RegistryConfig - 1, // 9: buf.alpha.registry.v1alpha1.CuratedPlugin.output_languages:type_name -> buf.alpha.registry.v1alpha1.PluginLanguage - 24, // 10: buf.alpha.registry.v1alpha1.GenerateCodeRequest.image:type_name -> buf.alpha.image.v1.Image - 10, // 11: buf.alpha.registry.v1alpha1.GenerateCodeRequest.requests:type_name -> buf.alpha.registry.v1alpha1.PluginGenerationRequest - 11, // 12: buf.alpha.registry.v1alpha1.GenerateCodeResponse.responses:type_name -> buf.alpha.registry.v1alpha1.PluginGenerationResponse - 6, // 13: buf.alpha.registry.v1alpha1.PluginGenerationRequest.plugin_reference:type_name -> buf.alpha.registry.v1alpha1.CuratedPluginReference - 25, // 14: buf.alpha.registry.v1alpha1.PluginGenerationResponse.response:type_name -> google.protobuf.compiler.CodeGeneratorResponse - 0, // 15: buf.alpha.registry.v1alpha1.CreateCuratedPluginRequest.registry_type:type_name -> buf.alpha.registry.v1alpha1.PluginRegistryType - 6, // 16: buf.alpha.registry.v1alpha1.CreateCuratedPluginRequest.dependencies:type_name -> buf.alpha.registry.v1alpha1.CuratedPluginReference - 5, // 17: buf.alpha.registry.v1alpha1.CreateCuratedPluginRequest.registry_config:type_name -> buf.alpha.registry.v1alpha1.RegistryConfig - 1, // 18: buf.alpha.registry.v1alpha1.CreateCuratedPluginRequest.output_languages:type_name -> buf.alpha.registry.v1alpha1.PluginLanguage - 7, // 19: buf.alpha.registry.v1alpha1.CreateCuratedPluginResponse.configuration:type_name -> buf.alpha.registry.v1alpha1.CuratedPlugin - 7, // 20: buf.alpha.registry.v1alpha1.ListCuratedPluginsResponse.plugins:type_name -> buf.alpha.registry.v1alpha1.CuratedPlugin - 7, // 21: buf.alpha.registry.v1alpha1.GetLatestCuratedPluginResponse.plugin:type_name -> buf.alpha.registry.v1alpha1.CuratedPlugin - 20, // 22: buf.alpha.registry.v1alpha1.GetLatestCuratedPluginResponse.versions:type_name -> buf.alpha.registry.v1alpha1.CuratedPluginVersionRevisions - 16, // 23: buf.alpha.registry.v1alpha1.PluginCurationService.ListCuratedPlugins:input_type -> buf.alpha.registry.v1alpha1.ListCuratedPluginsRequest - 14, // 24: buf.alpha.registry.v1alpha1.PluginCurationService.CreateCuratedPlugin:input_type -> buf.alpha.registry.v1alpha1.CreateCuratedPluginRequest - 18, // 25: buf.alpha.registry.v1alpha1.PluginCurationService.GetLatestCuratedPlugin:input_type -> buf.alpha.registry.v1alpha1.GetLatestCuratedPluginRequest - 12, // 26: buf.alpha.registry.v1alpha1.PluginCurationService.DeleteCuratedPlugin:input_type -> buf.alpha.registry.v1alpha1.DeleteCuratedPluginRequest - 8, // 27: buf.alpha.registry.v1alpha1.CodeGenerationService.GenerateCode:input_type -> buf.alpha.registry.v1alpha1.GenerateCodeRequest - 17, // 28: buf.alpha.registry.v1alpha1.PluginCurationService.ListCuratedPlugins:output_type -> buf.alpha.registry.v1alpha1.ListCuratedPluginsResponse - 15, // 29: buf.alpha.registry.v1alpha1.PluginCurationService.CreateCuratedPlugin:output_type -> buf.alpha.registry.v1alpha1.CreateCuratedPluginResponse - 19, // 30: buf.alpha.registry.v1alpha1.PluginCurationService.GetLatestCuratedPlugin:output_type -> buf.alpha.registry.v1alpha1.GetLatestCuratedPluginResponse - 13, // 31: buf.alpha.registry.v1alpha1.PluginCurationService.DeleteCuratedPlugin:output_type -> buf.alpha.registry.v1alpha1.DeleteCuratedPluginResponse - 9, // 32: buf.alpha.registry.v1alpha1.CodeGenerationService.GenerateCode:output_type -> buf.alpha.registry.v1alpha1.GenerateCodeResponse - 28, // [28:33] is the sub-list for method output_type - 23, // [23:28] is the sub-list for method input_type - 23, // [23:23] is the sub-list for extension type_name - 23, // [23:23] is the sub-list for extension extendee - 0, // [0:23] is the sub-list for field type_name + 24, // 3: buf.alpha.registry.v1alpha1.MavenConfig.runtime_libraries:type_name -> buf.alpha.registry.v1alpha1.MavenConfig.RuntimeLibrary + 3, // 4: buf.alpha.registry.v1alpha1.RegistryConfig.go_config:type_name -> buf.alpha.registry.v1alpha1.GoConfig + 4, // 5: buf.alpha.registry.v1alpha1.RegistryConfig.npm_config:type_name -> buf.alpha.registry.v1alpha1.NPMConfig + 5, // 6: buf.alpha.registry.v1alpha1.RegistryConfig.maven_config:type_name -> buf.alpha.registry.v1alpha1.MavenConfig + 0, // 7: buf.alpha.registry.v1alpha1.CuratedPlugin.registry_type:type_name -> buf.alpha.registry.v1alpha1.PluginRegistryType + 25, // 8: buf.alpha.registry.v1alpha1.CuratedPlugin.create_time:type_name -> google.protobuf.Timestamp + 7, // 9: buf.alpha.registry.v1alpha1.CuratedPlugin.dependencies:type_name -> buf.alpha.registry.v1alpha1.CuratedPluginReference + 6, // 10: buf.alpha.registry.v1alpha1.CuratedPlugin.registry_config:type_name -> buf.alpha.registry.v1alpha1.RegistryConfig + 1, // 11: buf.alpha.registry.v1alpha1.CuratedPlugin.output_languages:type_name -> buf.alpha.registry.v1alpha1.PluginLanguage + 26, // 12: buf.alpha.registry.v1alpha1.GenerateCodeRequest.image:type_name -> buf.alpha.image.v1.Image + 11, // 13: buf.alpha.registry.v1alpha1.GenerateCodeRequest.requests:type_name -> buf.alpha.registry.v1alpha1.PluginGenerationRequest + 12, // 14: buf.alpha.registry.v1alpha1.GenerateCodeResponse.responses:type_name -> buf.alpha.registry.v1alpha1.PluginGenerationResponse + 7, // 15: buf.alpha.registry.v1alpha1.PluginGenerationRequest.plugin_reference:type_name -> buf.alpha.registry.v1alpha1.CuratedPluginReference + 27, // 16: buf.alpha.registry.v1alpha1.PluginGenerationResponse.response:type_name -> google.protobuf.compiler.CodeGeneratorResponse + 0, // 17: buf.alpha.registry.v1alpha1.CreateCuratedPluginRequest.registry_type:type_name -> buf.alpha.registry.v1alpha1.PluginRegistryType + 7, // 18: buf.alpha.registry.v1alpha1.CreateCuratedPluginRequest.dependencies:type_name -> buf.alpha.registry.v1alpha1.CuratedPluginReference + 6, // 19: buf.alpha.registry.v1alpha1.CreateCuratedPluginRequest.registry_config:type_name -> buf.alpha.registry.v1alpha1.RegistryConfig + 1, // 20: buf.alpha.registry.v1alpha1.CreateCuratedPluginRequest.output_languages:type_name -> buf.alpha.registry.v1alpha1.PluginLanguage + 8, // 21: buf.alpha.registry.v1alpha1.CreateCuratedPluginResponse.configuration:type_name -> buf.alpha.registry.v1alpha1.CuratedPlugin + 8, // 22: buf.alpha.registry.v1alpha1.ListCuratedPluginsResponse.plugins:type_name -> buf.alpha.registry.v1alpha1.CuratedPlugin + 8, // 23: buf.alpha.registry.v1alpha1.GetLatestCuratedPluginResponse.plugin:type_name -> buf.alpha.registry.v1alpha1.CuratedPlugin + 21, // 24: buf.alpha.registry.v1alpha1.GetLatestCuratedPluginResponse.versions:type_name -> buf.alpha.registry.v1alpha1.CuratedPluginVersionRevisions + 17, // 25: buf.alpha.registry.v1alpha1.PluginCurationService.ListCuratedPlugins:input_type -> buf.alpha.registry.v1alpha1.ListCuratedPluginsRequest + 15, // 26: buf.alpha.registry.v1alpha1.PluginCurationService.CreateCuratedPlugin:input_type -> buf.alpha.registry.v1alpha1.CreateCuratedPluginRequest + 19, // 27: buf.alpha.registry.v1alpha1.PluginCurationService.GetLatestCuratedPlugin:input_type -> buf.alpha.registry.v1alpha1.GetLatestCuratedPluginRequest + 13, // 28: buf.alpha.registry.v1alpha1.PluginCurationService.DeleteCuratedPlugin:input_type -> buf.alpha.registry.v1alpha1.DeleteCuratedPluginRequest + 9, // 29: buf.alpha.registry.v1alpha1.CodeGenerationService.GenerateCode:input_type -> buf.alpha.registry.v1alpha1.GenerateCodeRequest + 18, // 30: buf.alpha.registry.v1alpha1.PluginCurationService.ListCuratedPlugins:output_type -> buf.alpha.registry.v1alpha1.ListCuratedPluginsResponse + 16, // 31: buf.alpha.registry.v1alpha1.PluginCurationService.CreateCuratedPlugin:output_type -> buf.alpha.registry.v1alpha1.CreateCuratedPluginResponse + 20, // 32: buf.alpha.registry.v1alpha1.PluginCurationService.GetLatestCuratedPlugin:output_type -> buf.alpha.registry.v1alpha1.GetLatestCuratedPluginResponse + 14, // 33: buf.alpha.registry.v1alpha1.PluginCurationService.DeleteCuratedPlugin:output_type -> buf.alpha.registry.v1alpha1.DeleteCuratedPluginResponse + 10, // 34: buf.alpha.registry.v1alpha1.CodeGenerationService.GenerateCode:output_type -> buf.alpha.registry.v1alpha1.GenerateCodeResponse + 30, // [30:35] is the sub-list for method output_type + 25, // [25:30] is the sub-list for method input_type + 25, // [25:25] is the sub-list for extension type_name + 25, // [25:25] is the sub-list for extension extendee + 0, // [0:25] is the sub-list for field type_name } func init() { file_buf_alpha_registry_v1alpha1_plugin_curation_proto_init() } @@ -2167,7 +2304,7 @@ func file_buf_alpha_registry_v1alpha1_plugin_curation_proto_init() { } } file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RegistryConfig); i { + switch v := v.(*MavenConfig); i { case 0: return &v.state case 1: @@ -2179,7 +2316,7 @@ func file_buf_alpha_registry_v1alpha1_plugin_curation_proto_init() { } } file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CuratedPluginReference); i { + switch v := v.(*RegistryConfig); i { case 0: return &v.state case 1: @@ -2191,7 +2328,7 @@ func file_buf_alpha_registry_v1alpha1_plugin_curation_proto_init() { } } file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CuratedPlugin); i { + switch v := v.(*CuratedPluginReference); i { case 0: return &v.state case 1: @@ -2203,7 +2340,7 @@ func file_buf_alpha_registry_v1alpha1_plugin_curation_proto_init() { } } file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GenerateCodeRequest); i { + switch v := v.(*CuratedPlugin); i { case 0: return &v.state case 1: @@ -2215,7 +2352,7 @@ func file_buf_alpha_registry_v1alpha1_plugin_curation_proto_init() { } } file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GenerateCodeResponse); i { + switch v := v.(*GenerateCodeRequest); i { case 0: return &v.state case 1: @@ -2227,7 +2364,7 @@ func file_buf_alpha_registry_v1alpha1_plugin_curation_proto_init() { } } file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PluginGenerationRequest); i { + switch v := v.(*GenerateCodeResponse); i { case 0: return &v.state case 1: @@ -2239,7 +2376,7 @@ func file_buf_alpha_registry_v1alpha1_plugin_curation_proto_init() { } } file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PluginGenerationResponse); i { + switch v := v.(*PluginGenerationRequest); i { case 0: return &v.state case 1: @@ -2251,7 +2388,7 @@ func file_buf_alpha_registry_v1alpha1_plugin_curation_proto_init() { } } file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteCuratedPluginRequest); i { + switch v := v.(*PluginGenerationResponse); i { case 0: return &v.state case 1: @@ -2263,7 +2400,7 @@ func file_buf_alpha_registry_v1alpha1_plugin_curation_proto_init() { } } file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteCuratedPluginResponse); i { + switch v := v.(*DeleteCuratedPluginRequest); i { case 0: return &v.state case 1: @@ -2275,7 +2412,7 @@ func file_buf_alpha_registry_v1alpha1_plugin_curation_proto_init() { } } file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateCuratedPluginRequest); i { + switch v := v.(*DeleteCuratedPluginResponse); i { case 0: return &v.state case 1: @@ -2287,7 +2424,7 @@ func file_buf_alpha_registry_v1alpha1_plugin_curation_proto_init() { } } file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateCuratedPluginResponse); i { + switch v := v.(*CreateCuratedPluginRequest); i { case 0: return &v.state case 1: @@ -2299,7 +2436,7 @@ func file_buf_alpha_registry_v1alpha1_plugin_curation_proto_init() { } } file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListCuratedPluginsRequest); i { + switch v := v.(*CreateCuratedPluginResponse); i { case 0: return &v.state case 1: @@ -2311,7 +2448,7 @@ func file_buf_alpha_registry_v1alpha1_plugin_curation_proto_init() { } } file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListCuratedPluginsResponse); i { + switch v := v.(*ListCuratedPluginsRequest); i { case 0: return &v.state case 1: @@ -2323,7 +2460,7 @@ func file_buf_alpha_registry_v1alpha1_plugin_curation_proto_init() { } } file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetLatestCuratedPluginRequest); i { + switch v := v.(*ListCuratedPluginsResponse); i { case 0: return &v.state case 1: @@ -2335,7 +2472,7 @@ func file_buf_alpha_registry_v1alpha1_plugin_curation_proto_init() { } } file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetLatestCuratedPluginResponse); i { + switch v := v.(*GetLatestCuratedPluginRequest); i { case 0: return &v.state case 1: @@ -2347,7 +2484,7 @@ func file_buf_alpha_registry_v1alpha1_plugin_curation_proto_init() { } } file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CuratedPluginVersionRevisions); i { + switch v := v.(*GetLatestCuratedPluginResponse); i { case 0: return &v.state case 1: @@ -2359,7 +2496,7 @@ func file_buf_alpha_registry_v1alpha1_plugin_curation_proto_init() { } } file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GoConfig_RuntimeLibrary); i { + switch v := v.(*CuratedPluginVersionRevisions); i { case 0: return &v.state case 1: @@ -2371,6 +2508,18 @@ func file_buf_alpha_registry_v1alpha1_plugin_curation_proto_init() { } } file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GoConfig_RuntimeLibrary); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*NPMConfig_RuntimeLibrary); i { case 0: return &v.state @@ -2382,10 +2531,23 @@ func file_buf_alpha_registry_v1alpha1_plugin_curation_proto_init() { return nil } } + file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MavenConfig_RuntimeLibrary); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } - file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[2].OneofWrappers = []interface{}{ + file_buf_alpha_registry_v1alpha1_plugin_curation_proto_msgTypes[3].OneofWrappers = []interface{}{ (*RegistryConfig_GoConfig)(nil), (*RegistryConfig_NpmConfig)(nil), + (*RegistryConfig_MavenConfig)(nil), } type x struct{} out := protoimpl.TypeBuilder{ @@ -2393,7 +2555,7 @@ func file_buf_alpha_registry_v1alpha1_plugin_curation_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_buf_alpha_registry_v1alpha1_plugin_curation_proto_rawDesc, NumEnums: 3, - NumMessages: 20, + NumMessages: 22, NumExtensions: 0, NumServices: 2, }, diff --git a/vendor/github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/registry/v1alpha1/push.pb.go b/vendor/github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/registry/v1alpha1/push.pb.go index 40de4b886d..34adac9403 100644 --- a/vendor/github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/registry/v1alpha1/push.pb.go +++ b/vendor/github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/registry/v1alpha1/push.pb.go @@ -35,6 +35,7 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// PushRequest specifies the module to push to the BSR. type PushRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -141,6 +142,7 @@ func (x *PushRequest) GetDraftName() string { return "" } +// PushResponse is the pushed module pin, local to the used remote. type PushResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -188,6 +190,151 @@ func (x *PushResponse) GetLocalModulePin() *LocalModulePin { return nil } +// PushManifestAndBlobsRequest holds the module to push in the manifest+blobs +// encoding format. +type PushManifestAndBlobsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner,omitempty"` + Repository string `protobuf:"bytes,2,opt,name=repository,proto3" json:"repository,omitempty"` + // Manifest with all the module files being pushed. + Manifest *v1alpha1.Blob `protobuf:"bytes,3,opt,name=manifest,proto3" json:"manifest,omitempty"` + // Referenced blobs in the manifest. Keep in mind there is not necessarily one + // blob per file, but one blob per digest, so for files with exactly the same + // content, you can send just one blob. + Blobs []*v1alpha1.Blob `protobuf:"bytes,4,rep,name=blobs,proto3" json:"blobs,omitempty"` + // Optional; if provided, the provided tags + // are created for the pushed commit. + Tags []string `protobuf:"bytes,5,rep,name=tags,proto3" json:"tags,omitempty"` + // If non-empty, the push creates a draft commit with this name. + DraftName string `protobuf:"bytes,6,opt,name=draft_name,json=draftName,proto3" json:"draft_name,omitempty"` +} + +func (x *PushManifestAndBlobsRequest) Reset() { + *x = PushManifestAndBlobsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_buf_alpha_registry_v1alpha1_push_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PushManifestAndBlobsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PushManifestAndBlobsRequest) ProtoMessage() {} + +func (x *PushManifestAndBlobsRequest) ProtoReflect() protoreflect.Message { + mi := &file_buf_alpha_registry_v1alpha1_push_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PushManifestAndBlobsRequest.ProtoReflect.Descriptor instead. +func (*PushManifestAndBlobsRequest) Descriptor() ([]byte, []int) { + return file_buf_alpha_registry_v1alpha1_push_proto_rawDescGZIP(), []int{2} +} + +func (x *PushManifestAndBlobsRequest) GetOwner() string { + if x != nil { + return x.Owner + } + return "" +} + +func (x *PushManifestAndBlobsRequest) GetRepository() string { + if x != nil { + return x.Repository + } + return "" +} + +func (x *PushManifestAndBlobsRequest) GetManifest() *v1alpha1.Blob { + if x != nil { + return x.Manifest + } + return nil +} + +func (x *PushManifestAndBlobsRequest) GetBlobs() []*v1alpha1.Blob { + if x != nil { + return x.Blobs + } + return nil +} + +func (x *PushManifestAndBlobsRequest) GetTags() []string { + if x != nil { + return x.Tags + } + return nil +} + +func (x *PushManifestAndBlobsRequest) GetDraftName() string { + if x != nil { + return x.DraftName + } + return "" +} + +// PushManifestAndBlobsResponse is the pushed module pin, local to the used +// remote. +type PushManifestAndBlobsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LocalModulePin *LocalModulePin `protobuf:"bytes,1,opt,name=local_module_pin,json=localModulePin,proto3" json:"local_module_pin,omitempty"` +} + +func (x *PushManifestAndBlobsResponse) Reset() { + *x = PushManifestAndBlobsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_buf_alpha_registry_v1alpha1_push_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PushManifestAndBlobsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PushManifestAndBlobsResponse) ProtoMessage() {} + +func (x *PushManifestAndBlobsResponse) ProtoReflect() protoreflect.Message { + mi := &file_buf_alpha_registry_v1alpha1_push_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PushManifestAndBlobsResponse.ProtoReflect.Descriptor instead. +func (*PushManifestAndBlobsResponse) Descriptor() ([]byte, []int) { + return file_buf_alpha_registry_v1alpha1_push_proto_rawDescGZIP(), []int{3} +} + +func (x *PushManifestAndBlobsResponse) GetLocalModulePin() *LocalModulePin { + if x != nil { + return x.LocalModulePin + } + return nil +} + var File_buf_alpha_registry_v1alpha1_push_proto protoreflect.FileDescriptor var file_buf_alpha_registry_v1alpha1_push_proto_rawDesc = []byte{ @@ -221,32 +368,64 @@ var file_buf_alpha_registry_v1alpha1_push_proto_rawDesc = []byte{ 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x50, 0x69, 0x6e, 0x52, 0x0e, 0x6c, 0x6f, 0x63, 0x61, - 0x6c, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x50, 0x69, 0x6e, 0x32, 0x6a, 0x0a, 0x0b, 0x50, 0x75, - 0x73, 0x68, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x5b, 0x0a, 0x04, 0x50, 0x75, 0x73, - 0x68, 0x12, 0x28, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, + 0x6c, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x50, 0x69, 0x6e, 0x22, 0xfa, 0x01, 0x0a, 0x1b, 0x50, + 0x75, 0x73, 0x68, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x41, 0x6e, 0x64, 0x42, 0x6c, + 0x6f, 0x62, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, + 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, + 0x12, 0x1e, 0x0a, 0x0a, 0x72, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, + 0x12, 0x3b, 0x0a, 0x08, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x6d, + 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x42, + 0x6c, 0x6f, 0x62, 0x52, 0x08, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, + 0x05, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x62, + 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x2e, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x05, 0x62, + 0x6c, 0x6f, 0x62, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x64, 0x72, 0x61, 0x66, + 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x72, + 0x61, 0x66, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x75, 0x0a, 0x1c, 0x50, 0x75, 0x73, 0x68, 0x4d, + 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x41, 0x6e, 0x64, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x55, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x70, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2b, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, - 0x50, 0x75, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x62, 0x75, - 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, - 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x96, 0x02, 0x0a, 0x1f, 0x63, 0x6f, 0x6d, 0x2e, 0x62, - 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, - 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x09, 0x50, 0x75, 0x73, 0x68, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x59, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x62, 0x75, 0x66, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2f, 0x62, 0x75, 0x66, - 0x2f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x2f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, - 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x3b, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0xa2, 0x02, 0x03, 0x42, 0x41, 0x52, 0xaa, 0x02, 0x1b, 0x42, 0x75, 0x66, 0x2e, 0x41, - 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x56, 0x31, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x1b, 0x42, 0x75, 0x66, 0x5c, 0x41, 0x6c, 0x70, - 0x68, 0x61, 0x5c, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x5c, 0x56, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x31, 0xe2, 0x02, 0x27, 0x42, 0x75, 0x66, 0x5c, 0x41, 0x6c, 0x70, 0x68, 0x61, - 0x5c, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, - 0x1e, 0x42, 0x75, 0x66, 0x3a, 0x3a, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x3a, 0x3a, 0x52, 0x65, 0x67, - 0x69, 0x73, 0x74, 0x72, 0x79, 0x3a, 0x3a, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x50, 0x69, 0x6e, 0x52, 0x0e, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x4d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x50, 0x69, 0x6e, 0x32, 0xf8, + 0x01, 0x0a, 0x0b, 0x50, 0x75, 0x73, 0x68, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x5b, + 0x0a, 0x04, 0x50, 0x75, 0x73, 0x68, 0x12, 0x28, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x29, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, + 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, + 0x75, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8b, 0x01, 0x0a, 0x14, + 0x50, 0x75, 0x73, 0x68, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x41, 0x6e, 0x64, 0x42, + 0x6c, 0x6f, 0x62, 0x73, 0x12, 0x38, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x41, + 0x6e, 0x64, 0x42, 0x6c, 0x6f, 0x62, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, + 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, + 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, 0x75, 0x73, + 0x68, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x41, 0x6e, 0x64, 0x42, 0x6c, 0x6f, 0x62, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x96, 0x02, 0x0a, 0x1f, 0x63, 0x6f, + 0x6d, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x09, 0x50, + 0x75, 0x73, 0x68, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x59, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x62, 0x75, 0x66, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2f, + 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x2f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x2f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x3b, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0xa2, 0x02, 0x03, 0x42, 0x41, 0x52, 0xaa, 0x02, 0x1b, 0x42, 0x75, + 0x66, 0x2e, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, + 0x2e, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x1b, 0x42, 0x75, 0x66, 0x5c, + 0x41, 0x6c, 0x70, 0x68, 0x61, 0x5c, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x5c, 0x56, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xe2, 0x02, 0x27, 0x42, 0x75, 0x66, 0x5c, 0x41, 0x6c, + 0x70, 0x68, 0x61, 0x5c, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x5c, 0x56, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0xea, 0x02, 0x1e, 0x42, 0x75, 0x66, 0x3a, 0x3a, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x3a, 0x3a, + 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x3a, 0x3a, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -261,23 +440,31 @@ func file_buf_alpha_registry_v1alpha1_push_proto_rawDescGZIP() []byte { return file_buf_alpha_registry_v1alpha1_push_proto_rawDescData } -var file_buf_alpha_registry_v1alpha1_push_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_buf_alpha_registry_v1alpha1_push_proto_msgTypes = make([]protoimpl.MessageInfo, 4) var file_buf_alpha_registry_v1alpha1_push_proto_goTypes = []interface{}{ - (*PushRequest)(nil), // 0: buf.alpha.registry.v1alpha1.PushRequest - (*PushResponse)(nil), // 1: buf.alpha.registry.v1alpha1.PushResponse - (*v1alpha1.Module)(nil), // 2: buf.alpha.module.v1alpha1.Module - (*LocalModulePin)(nil), // 3: buf.alpha.registry.v1alpha1.LocalModulePin + (*PushRequest)(nil), // 0: buf.alpha.registry.v1alpha1.PushRequest + (*PushResponse)(nil), // 1: buf.alpha.registry.v1alpha1.PushResponse + (*PushManifestAndBlobsRequest)(nil), // 2: buf.alpha.registry.v1alpha1.PushManifestAndBlobsRequest + (*PushManifestAndBlobsResponse)(nil), // 3: buf.alpha.registry.v1alpha1.PushManifestAndBlobsResponse + (*v1alpha1.Module)(nil), // 4: buf.alpha.module.v1alpha1.Module + (*LocalModulePin)(nil), // 5: buf.alpha.registry.v1alpha1.LocalModulePin + (*v1alpha1.Blob)(nil), // 6: buf.alpha.module.v1alpha1.Blob } var file_buf_alpha_registry_v1alpha1_push_proto_depIdxs = []int32{ - 2, // 0: buf.alpha.registry.v1alpha1.PushRequest.module:type_name -> buf.alpha.module.v1alpha1.Module - 3, // 1: buf.alpha.registry.v1alpha1.PushResponse.local_module_pin:type_name -> buf.alpha.registry.v1alpha1.LocalModulePin - 0, // 2: buf.alpha.registry.v1alpha1.PushService.Push:input_type -> buf.alpha.registry.v1alpha1.PushRequest - 1, // 3: buf.alpha.registry.v1alpha1.PushService.Push:output_type -> buf.alpha.registry.v1alpha1.PushResponse - 3, // [3:4] is the sub-list for method output_type - 2, // [2:3] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name + 4, // 0: buf.alpha.registry.v1alpha1.PushRequest.module:type_name -> buf.alpha.module.v1alpha1.Module + 5, // 1: buf.alpha.registry.v1alpha1.PushResponse.local_module_pin:type_name -> buf.alpha.registry.v1alpha1.LocalModulePin + 6, // 2: buf.alpha.registry.v1alpha1.PushManifestAndBlobsRequest.manifest:type_name -> buf.alpha.module.v1alpha1.Blob + 6, // 3: buf.alpha.registry.v1alpha1.PushManifestAndBlobsRequest.blobs:type_name -> buf.alpha.module.v1alpha1.Blob + 5, // 4: buf.alpha.registry.v1alpha1.PushManifestAndBlobsResponse.local_module_pin:type_name -> buf.alpha.registry.v1alpha1.LocalModulePin + 0, // 5: buf.alpha.registry.v1alpha1.PushService.Push:input_type -> buf.alpha.registry.v1alpha1.PushRequest + 2, // 6: buf.alpha.registry.v1alpha1.PushService.PushManifestAndBlobs:input_type -> buf.alpha.registry.v1alpha1.PushManifestAndBlobsRequest + 1, // 7: buf.alpha.registry.v1alpha1.PushService.Push:output_type -> buf.alpha.registry.v1alpha1.PushResponse + 3, // 8: buf.alpha.registry.v1alpha1.PushService.PushManifestAndBlobs:output_type -> buf.alpha.registry.v1alpha1.PushManifestAndBlobsResponse + 7, // [7:9] is the sub-list for method output_type + 5, // [5:7] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name } func init() { file_buf_alpha_registry_v1alpha1_push_proto_init() } @@ -311,6 +498,30 @@ func file_buf_alpha_registry_v1alpha1_push_proto_init() { return nil } } + file_buf_alpha_registry_v1alpha1_push_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PushManifestAndBlobsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_buf_alpha_registry_v1alpha1_push_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PushManifestAndBlobsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -318,7 +529,7 @@ func file_buf_alpha_registry_v1alpha1_push_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_buf_alpha_registry_v1alpha1_push_proto_rawDesc, NumEnums: 0, - NumMessages: 2, + NumMessages: 4, NumExtensions: 0, NumServices: 1, }, diff --git a/vendor/github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/registry/v1alpha1/user.pb.go b/vendor/github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/registry/v1alpha1/user.pb.go index f88d96fe6c..f7b3fa9b21 100644 --- a/vendor/github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/registry/v1alpha1/user.pb.go +++ b/vendor/github.com/bufbuild/buf/private/gen/proto/go/buf/alpha/registry/v1alpha1/user.pb.go @@ -90,6 +90,7 @@ const ( UserType_USER_TYPE_UNSPECIFIED UserType = 0 UserType_USER_TYPE_PERSONAL UserType = 1 UserType_USER_TYPE_MACHINE UserType = 2 + UserType_USER_TYPE_SYSTEM UserType = 3 ) // Enum value maps for UserType. @@ -98,11 +99,13 @@ var ( 0: "USER_TYPE_UNSPECIFIED", 1: "USER_TYPE_PERSONAL", 2: "USER_TYPE_MACHINE", + 3: "USER_TYPE_SYSTEM", } UserType_value = map[string]int32{ "USER_TYPE_UNSPECIFIED": 0, "USER_TYPE_PERSONAL": 1, "USER_TYPE_MACHINE": 2, + "USER_TYPE_SYSTEM": 3, } ) @@ -1503,108 +1506,109 @@ var file_buf_alpha_registry_v1alpha1_user_proto_rawDesc = []byte{ 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x55, 0x53, 0x45, 0x52, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x55, 0x53, 0x45, 0x52, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x44, 0x45, 0x41, 0x43, 0x54, 0x49, 0x56, 0x41, - 0x54, 0x45, 0x44, 0x10, 0x02, 0x2a, 0x54, 0x0a, 0x08, 0x55, 0x73, 0x65, 0x72, 0x54, 0x79, 0x70, + 0x54, 0x45, 0x44, 0x10, 0x02, 0x2a, 0x6a, 0x0a, 0x08, 0x55, 0x73, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x15, 0x55, 0x53, 0x45, 0x52, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x55, 0x53, 0x45, 0x52, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x50, 0x45, 0x52, 0x53, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x55, 0x53, 0x45, 0x52, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x4d, 0x41, 0x43, 0x48, 0x49, 0x4e, 0x45, 0x10, 0x02, 0x32, 0xd3, 0x09, 0x0a, 0x0b, - 0x55, 0x73, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x6d, 0x0a, 0x0a, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12, 0x2e, 0x2e, 0x62, 0x75, 0x66, 0x2e, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, - 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x62, 0x75, 0x66, 0x2e, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, - 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x64, 0x0a, 0x07, 0x47, 0x65, - 0x74, 0x55, 0x73, 0x65, 0x72, 0x12, 0x2b, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0x2e, 0x47, 0x65, 0x74, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x82, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x55, 0x73, 0x65, 0x72, 0x42, 0x79, 0x55, 0x73, - 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x55, 0x73, 0x65, 0x72, 0x42, 0x79, 0x55, 0x73, - 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, - 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, - 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x55, - 0x73, 0x65, 0x72, 0x42, 0x79, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6a, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, - 0x72, 0x73, 0x12, 0x2d, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x2e, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, - 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, - 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x8e, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, - 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x73, 0x65, 0x72, 0x73, 0x12, 0x39, 0x2e, 0x62, 0x75, + 0x45, 0x5f, 0x4d, 0x41, 0x43, 0x48, 0x49, 0x4e, 0x45, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x55, + 0x53, 0x45, 0x52, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x59, 0x53, 0x54, 0x45, 0x4d, 0x10, + 0x03, 0x32, 0xd3, 0x09, 0x0a, 0x0b, 0x55, 0x73, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x12, 0x6d, 0x0a, 0x0a, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12, + 0x2e, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2f, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x64, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x55, 0x73, 0x65, 0x72, 0x12, 0x2b, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, - 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x72, - 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x6d, 0x0a, 0x0a, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, - 0x12, 0x2e, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, - 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x2f, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, - 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x79, 0x0a, 0x0e, 0x44, 0x65, 0x61, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x55, - 0x73, 0x65, 0x72, 0x12, 0x32, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x55, 0x73, 0x65, + 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x82, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x55, 0x73, + 0x65, 0x72, 0x42, 0x79, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x2e, 0x62, + 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, + 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x55, 0x73, + 0x65, 0x72, 0x42, 0x79, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x2e, 0x44, 0x65, 0x61, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, + 0x31, 0x2e, 0x47, 0x65, 0x74, 0x55, 0x73, 0x65, 0x72, 0x42, 0x79, 0x55, 0x73, 0x65, 0x72, 0x6e, + 0x61, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6a, 0x0a, 0x09, 0x4c, + 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x12, 0x2d, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x61, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, - 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8b, 0x01, 0x0a, - 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x52, 0x6f, 0x6c, 0x65, 0x12, 0x38, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8e, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, + 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x73, 0x65, 0x72, + 0x73, 0x12, 0x39, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, + 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x62, + 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, + 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, + 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x73, 0x65, 0x72, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6d, 0x0a, 0x0a, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12, 0x2e, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x79, 0x0a, 0x0e, 0x44, 0x65, 0x61, 0x63, 0x74, + 0x69, 0x76, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12, 0x32, 0x2e, 0x62, 0x75, 0x66, 0x2e, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x61, 0x63, 0x74, 0x69, 0x76, 0x61, + 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, + 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, + 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x61, 0x63, + 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x8b, 0x01, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, + 0x72, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x6f, 0x6c, 0x65, 0x12, 0x38, 0x2e, 0x62, 0x75, + 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x55, 0x73, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x6f, 0x6c, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x52, 0x6f, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x39, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x6f, - 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6d, 0x0a, 0x0a, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x55, 0x73, 0x65, 0x72, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x55, 0x73, 0x65, 0x72, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x85, 0x01, 0x0a, 0x12, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, - 0x12, 0x36, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, - 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, - 0x72, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x42, 0x96, 0x02, 0x0a, 0x1f, 0x63, 0x6f, 0x6d, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x09, 0x55, 0x73, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x50, 0x01, 0x5a, 0x59, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x62, - 0x75, 0x66, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x69, 0x76, - 0x61, 0x74, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, - 0x2f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x72, 0x65, 0x67, 0x69, 0x73, - 0x74, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x72, 0x65, 0x67, - 0x69, 0x73, 0x74, 0x72, 0x79, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xa2, 0x02, 0x03, - 0x42, 0x41, 0x52, 0xaa, 0x02, 0x1b, 0x42, 0x75, 0x66, 0x2e, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x2e, - 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0xca, 0x02, 0x1b, 0x42, 0x75, 0x66, 0x5c, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x5c, 0x52, 0x65, - 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xe2, - 0x02, 0x27, 0x42, 0x75, 0x66, 0x5c, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x5c, 0x52, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x72, 0x79, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x5c, 0x47, 0x50, - 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1e, 0x42, 0x75, 0x66, 0x3a, - 0x3a, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x3a, 0x3a, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, - 0x3a, 0x3a, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x72, 0x76, 0x65, 0x72, 0x52, 0x6f, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x6d, 0x0a, 0x0a, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x12, 0x2e, + 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, + 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, + 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, + 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x85, 0x01, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x36, 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, + 0x2e, 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, + 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x96, 0x02, 0x0a, 0x1f, 0x63, 0x6f, 0x6d, 0x2e, + 0x62, 0x75, 0x66, 0x2e, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, + 0x72, 0x79, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x09, 0x55, 0x73, 0x65, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x59, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x62, 0x75, 0x66, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x2f, 0x62, 0x75, + 0x66, 0x2f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x2f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x2f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x3b, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x31, 0xa2, 0x02, 0x03, 0x42, 0x41, 0x52, 0xaa, 0x02, 0x1b, 0x42, 0x75, 0x66, 0x2e, + 0x41, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x56, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x1b, 0x42, 0x75, 0x66, 0x5c, 0x41, 0x6c, + 0x70, 0x68, 0x61, 0x5c, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x5c, 0x56, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0xe2, 0x02, 0x27, 0x42, 0x75, 0x66, 0x5c, 0x41, 0x6c, 0x70, 0x68, + 0x61, 0x5c, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x5c, 0x56, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x31, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, + 0x02, 0x1e, 0x42, 0x75, 0x66, 0x3a, 0x3a, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x3a, 0x3a, 0x52, 0x65, + 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x3a, 0x3a, 0x56, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/github.com/bufbuild/buf/private/pkg/app/app.go b/vendor/github.com/bufbuild/buf/private/pkg/app/app.go index ba2470a0d3..23ab9f8299 100644 --- a/vendor/github.com/bufbuild/buf/private/pkg/app/app.go +++ b/vendor/github.com/bufbuild/buf/private/pkg/app/app.go @@ -26,7 +26,7 @@ import ( "github.com/bufbuild/buf/private/pkg/interrupt" ) -// EnvContainer provides envionment variables. +// EnvContainer provides environment variables. type EnvContainer interface { // Env gets the environment variable value for the key. // @@ -221,7 +221,7 @@ type EnvStdioContainer interface { // Environ returns all environment variables in the form "KEY=VALUE". // -// Equivalent to os.Enviorn. +// Equivalent to os.Environ. // // Sorted. func Environ(envContainer EnvContainer) []string { diff --git a/vendor/github.com/bufbuild/buf/private/pkg/app/appcmd/appcmd.go b/vendor/github.com/bufbuild/buf/private/pkg/app/appcmd/appcmd.go index 87de084f80..b921c900af 100644 --- a/vendor/github.com/bufbuild/buf/private/pkg/app/appcmd/appcmd.go +++ b/vendor/github.com/bufbuild/buf/private/pkg/app/appcmd/appcmd.go @@ -137,11 +137,11 @@ func run( container, &Command{ Use: "completion", - Short: "Generate auto-completion scripts for commonly used shells.", + Short: "Generate auto-completion scripts for commonly used shells", SubCommands: []*Command{ { Use: "bash", - Short: "Generate auto-completion scripts for bash.", + Short: "Generate auto-completion scripts for bash", Args: cobra.NoArgs, Run: func(ctx context.Context, container app.Container) error { return cobraCommand.GenBashCompletion(container.Stdout()) @@ -149,7 +149,7 @@ func run( }, { Use: "fish", - Short: "Generate auto-completion scripts for fish.", + Short: "Generate auto-completion scripts for fish", Args: cobra.NoArgs, Run: func(ctx context.Context, container app.Container) error { return cobraCommand.GenFishCompletion(container.Stdout(), true) @@ -157,7 +157,7 @@ func run( }, { Use: "powershell", - Short: "Generate auto-completion scripts for powershell.", + Short: "Generate auto-completion scripts for powershell", Args: cobra.NoArgs, Run: func(ctx context.Context, container app.Container) error { return cobraCommand.GenPowerShellCompletion(container.Stdout()) @@ -165,7 +165,7 @@ func run( }, { Use: "zsh", - Short: "Generate auto-completion scripts for zsh.", + Short: "Generate auto-completion scripts for zsh", Args: cobra.NoArgs, Run: func(ctx context.Context, container app.Container) error { return cobraCommand.GenZshCompletion(container.Stdout()) @@ -320,7 +320,7 @@ func commandToCobra( &doVersion, "version", false, - "Print the version.", + "Print the version", ) cobraCommand.Run = func(cmd *cobra.Command, args []string) { if doVersion { diff --git a/vendor/github.com/bufbuild/buf/private/pkg/app/appflag/builder.go b/vendor/github.com/bufbuild/buf/private/pkg/app/appflag/builder.go index 753bed1d7c..2fb18de53d 100644 --- a/vendor/github.com/bufbuild/buf/private/pkg/app/appflag/builder.go +++ b/vendor/github.com/bufbuild/buf/private/pkg/app/appflag/builder.go @@ -23,11 +23,10 @@ import ( "github.com/bufbuild/buf/private/pkg/app" "github.com/bufbuild/buf/private/pkg/app/applog" "github.com/bufbuild/buf/private/pkg/app/appverbose" - "github.com/bufbuild/buf/private/pkg/observability" - "github.com/bufbuild/buf/private/pkg/observability/observabilityzap" + "github.com/bufbuild/buf/private/pkg/observabilityzap" "github.com/pkg/profile" "github.com/spf13/pflag" - "go.opencensus.io/trace" + "go.opentelemetry.io/otel" "go.uber.org/multierr" "go.uber.org/zap" ) @@ -64,26 +63,26 @@ func newBuilder(appName string, options ...BuilderOption) *builder { } func (b *builder) BindRoot(flagSet *pflag.FlagSet) { - flagSet.BoolVarP(&b.verbose, "verbose", "v", false, "Turn on verbose mode.") - flagSet.BoolVar(&b.debug, "debug", false, "Turn on debug logging.") - flagSet.StringVar(&b.logFormat, "log-format", "color", "The log format [text,color,json].") + flagSet.BoolVarP(&b.verbose, "verbose", "v", false, "Turn on verbose mode") + flagSet.BoolVar(&b.debug, "debug", false, "Turn on debug logging") + flagSet.StringVar(&b.logFormat, "log-format", "color", "The log format [text,color,json]") if b.defaultTimeout > 0 { - flagSet.DurationVar(&b.timeout, "timeout", b.defaultTimeout, `The duration until timing out.`) + flagSet.DurationVar(&b.timeout, "timeout", b.defaultTimeout, `The duration until timing out`) } - flagSet.BoolVar(&b.profile, "profile", false, "Run profiling.") + flagSet.BoolVar(&b.profile, "profile", false, "Run profiling") _ = flagSet.MarkHidden("profile") - flagSet.StringVar(&b.profilePath, "profile-path", "", "The profile base directory path.") + flagSet.StringVar(&b.profilePath, "profile-path", "", "The profile base directory path") _ = flagSet.MarkHidden("profile-path") - flagSet.IntVar(&b.profileLoops, "profile-loops", 1, "The number of loops to run.") + flagSet.IntVar(&b.profileLoops, "profile-loops", 1, "The number of loops to run") _ = flagSet.MarkHidden("profile-loops") - flagSet.StringVar(&b.profileType, "profile-type", "cpu", "The profile type [cpu,mem,block,mutex].") + flagSet.StringVar(&b.profileType, "profile-type", "cpu", "The profile type [cpu,mem,block,mutex]") _ = flagSet.MarkHidden("profile-type") - flagSet.BoolVar(&b.profileAllowError, "profile-allow-error", false, "Allow errors for profiled commands.") + flagSet.BoolVar(&b.profileAllowError, "profile-allow-error", false, "Allow errors for profiled commands") _ = flagSet.MarkHidden("profile-allow-error") // We do not officially support this flag, this is for testing, where we need warnings turned off. - flagSet.BoolVar(&b.noWarn, "no-warn", false, "Turn off warn logging.") + flagSet.BoolVar(&b.noWarn, "no-warn", false, "Turn off warn logging") _ = flagSet.MarkHidden("no-warn") } @@ -129,16 +128,11 @@ func (b *builder) run( } if b.tracing { - closer := observability.Start( - observability.StartWithTraceExportCloser( - observabilityzap.NewTraceExportCloser(logger), - ), - ) + closer := observabilityzap.Start(logger) defer func() { retErr = multierr.Append(retErr, closer.Close()) }() - var span *trace.Span - ctx, span = trace.StartSpan(ctx, "command") + _, span := otel.GetTracerProvider().Tracer("bufbuild/buf").Start(ctx, "command") defer span.End() } if !b.profile { diff --git a/vendor/github.com/bufbuild/buf/private/pkg/app/appproto/appproto.go b/vendor/github.com/bufbuild/buf/private/pkg/app/appproto/appproto.go index 5bec1c6951..ef1435dd2c 100644 --- a/vendor/github.com/bufbuild/buf/private/pkg/app/appproto/appproto.go +++ b/vendor/github.com/bufbuild/buf/private/pkg/app/appproto/appproto.go @@ -49,7 +49,7 @@ const ( // ResponseBuilder builds CodeGeneratorResponses. type ResponseBuilder interface { - // Add adds the file to the response. + // AddFile adds the file to the response. // // Returns error if nil or the name is empty. // Warns to stderr if the name is already added or the name is not normalized. diff --git a/vendor/github.com/bufbuild/buf/private/pkg/app/appproto/appprotoexec/appprotoexec.go b/vendor/github.com/bufbuild/buf/private/pkg/app/appproto/appprotoexec/appprotoexec.go index 3de0d32443..2a8a8e431c 100644 --- a/vendor/github.com/bufbuild/buf/private/pkg/app/appproto/appprotoexec/appprotoexec.go +++ b/vendor/github.com/bufbuild/buf/private/pkg/app/appproto/appprotoexec/appprotoexec.go @@ -40,7 +40,7 @@ const ( // DefaultMinorVersion is the default minor version. defaultMinorVersion = 21 // DefaultPatchVersion is the default patch version. - defaultPatchVersion = 7 + defaultPatchVersion = 12 // DefaultSuffixVersion is the default suffix version. defaultSuffixVersion = "" ) @@ -97,14 +97,23 @@ func NewGenerator( // GenerateOption is an option for Generate. type GenerateOption func(*generateOptions) -// GenerateWithPluginPath returns a new GenerateOption that uses the given -// path to the plugin. -func GenerateWithPluginPath(pluginPath string) GenerateOption { +// GenerateWithPluginPath returns a new GenerateOption that uses the given path to the plugin. +// If the path has more than one element, the first is the plugin binary and the others are +// optional additional arguments to pass to the binary. +func GenerateWithPluginPath(pluginPath ...string) GenerateOption { return func(generateOptions *generateOptions) { generateOptions.pluginPath = pluginPath } } +// GenerateWithProtocPath returns a new GenerateOption that uses the given protoc +// path to the plugin. +func GenerateWithProtocPath(protocPath string) GenerateOption { + return func(generateOptions *generateOptions) { + generateOptions.protocPath = protocPath + } +} + // NewHandler returns a new Handler based on the plugin name and optional path. // // protocPath and pluginPath are optional. @@ -115,7 +124,6 @@ func GenerateWithPluginPath(pluginPath string) GenerateOption { // - Else, if the name is in ProtocProxyPluginNames, this returns a new protoc proxy handler. // - Else, this returns error. func NewHandler( - logger *zap.Logger, storageosProvider storageos.Provider, runner command.Runner, pluginName string, @@ -125,16 +133,16 @@ func NewHandler( for _, option := range options { option(handlerOptions) } - if handlerOptions.pluginPath != "" { - pluginPath, err := unsafeLookPath(handlerOptions.pluginPath) + if len(handlerOptions.pluginPath) > 0 { + pluginPath, err := unsafeLookPath(handlerOptions.pluginPath[0]) if err != nil { return nil, err } - return newBinaryHandler(logger, runner, pluginPath), nil + return newBinaryHandler(runner, pluginPath, handlerOptions.pluginPath[1:]), nil } pluginPath, err := unsafeLookPath("protoc-gen-" + pluginName) if err == nil { - return newBinaryHandler(logger, runner, pluginPath), nil + return newBinaryHandler(runner, pluginPath, nil), nil } // we always look for protoc-gen-X first, but if not, check the builtins if _, ok := ProtocProxyPluginNames[pluginName]; ok { @@ -145,7 +153,7 @@ func NewHandler( if err != nil { return nil, err } - return newProtocProxyHandler(logger, storageosProvider, runner, protocPath, pluginName), nil + return newProtocProxyHandler(storageosProvider, runner, protocPath, pluginName), nil } return nil, fmt.Errorf( "could not find protoc plugin for name %s - please make sure protoc-gen-%s is installed and present on your $PATH", @@ -160,6 +168,7 @@ type HandlerOption func(*handlerOptions) // HandlerWithProtocPath returns a new HandlerOption that sets the path to the protoc binary. // // The default is to do exec.LookPath on "protoc". +// protocPath is expected to be unnormalized. func HandlerWithProtocPath(protocPath string) HandlerOption { return func(handlerOptions *handlerOptions) { handlerOptions.protocPath = protocPath @@ -168,16 +177,30 @@ func HandlerWithProtocPath(protocPath string) HandlerOption { // HandlerWithPluginPath returns a new HandlerOption that sets the path to the plugin binary. // -// The default is to do exec.LookPath on "protoc-gen-" + pluginName. -func HandlerWithPluginPath(pluginPath string) HandlerOption { +// The default is to do exec.LookPath on "protoc-gen-" + pluginName. pluginPath is expected +// to be unnormalized. If the path has more than one element, the first is the plugin binary +// and the others are optional additional arguments to pass to the binary +func HandlerWithPluginPath(pluginPath ...string) HandlerOption { return func(handlerOptions *handlerOptions) { handlerOptions.pluginPath = pluginPath } } +// NewBinaryHandler returns a new Handler that invokes the specific plugin +// specified by pluginPath. +// +// Used by other repositories. +func NewBinaryHandler(runner command.Runner, pluginPath string, pluginArgs []string) (appproto.Handler, error) { + pluginPath, err := unsafeLookPath(pluginPath) + if err != nil { + return nil, err + } + return newBinaryHandler(runner, pluginPath, pluginArgs), nil +} + type handlerOptions struct { protocPath string - pluginPath string + pluginPath []string } func newHandlerOptions() *handlerOptions { diff --git a/vendor/github.com/bufbuild/buf/private/pkg/app/appproto/appprotoexec/binary_handler.go b/vendor/github.com/bufbuild/buf/private/pkg/app/appproto/appprotoexec/binary_handler.go index 5131306878..febb0807b3 100644 --- a/vendor/github.com/bufbuild/buf/private/pkg/app/appproto/appprotoexec/binary_handler.go +++ b/vendor/github.com/bufbuild/buf/private/pkg/app/appproto/appprotoexec/binary_handler.go @@ -17,32 +17,38 @@ package appprotoexec import ( "bytes" "context" + "io" "path/filepath" "github.com/bufbuild/buf/private/pkg/app" "github.com/bufbuild/buf/private/pkg/app/appproto" "github.com/bufbuild/buf/private/pkg/command" + "github.com/bufbuild/buf/private/pkg/ioextended" "github.com/bufbuild/buf/private/pkg/protoencoding" - "go.opencensus.io/trace" - "go.uber.org/zap" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" "google.golang.org/protobuf/types/pluginpb" ) type binaryHandler struct { - logger *zap.Logger runner command.Runner pluginPath string + tracer trace.Tracer + pluginArgs []string } func newBinaryHandler( - logger *zap.Logger, runner command.Runner, pluginPath string, + pluginArgs []string, ) *binaryHandler { return &binaryHandler{ - logger: logger.Named("appprotoexec"), runner: runner, pluginPath: pluginPath, + tracer: otel.GetTracerProvider().Tracer("bufbuild/buf"), + pluginArgs: pluginArgs, } } @@ -52,31 +58,46 @@ func (h *binaryHandler) Handle( responseWriter appproto.ResponseBuilder, request *pluginpb.CodeGeneratorRequest, ) error { - ctx, span := trace.StartSpan(ctx, "plugin_proxy") - span.AddAttributes(trace.StringAttribute("plugin", filepath.Base(h.pluginPath))) + ctx, span := h.tracer.Start(ctx, "plugin_proxy", trace.WithAttributes( + attribute.Key("plugin").String(filepath.Base(h.pluginPath)), + )) defer span.End() requestData, err := protoencoding.NewWireMarshaler().Marshal(request) if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) return err } responseBuffer := bytes.NewBuffer(nil) - if err := h.runner.Run( - ctx, - h.pluginPath, + stderrWriteCloser := newStderrWriteCloser(container.Stderr(), h.pluginPath) + runOptions := []command.RunOption{ command.RunWithEnv(app.EnvironMap(container)), command.RunWithStdin(bytes.NewReader(requestData)), command.RunWithStdout(responseBuffer), - command.RunWithStderr(container.Stderr()), + command.RunWithStderr(stderrWriteCloser), + } + if len(h.pluginArgs) > 0 { + runOptions = append(runOptions, command.RunWithArgs(h.pluginArgs...)) + } + if err := h.runner.Run( + ctx, + h.pluginPath, + runOptions..., ); err != nil { - // TODO: strip binary path as well? - return handlePotentialTooManyFilesError(err) + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + return err } response := &pluginpb.CodeGeneratorResponse{} if err := protoencoding.NewWireUnmarshaler(nil).Unmarshal(responseBuffer.Bytes(), response); err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) return err } response, err = normalizeCodeGeneratorResponse(response) if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) return err } if response.GetSupportedFeatures()&uint64(pluginpb.CodeGeneratorResponse_FEATURE_PROTO3_OPTIONAL) != 0 { @@ -84,6 +105,8 @@ func (h *binaryHandler) Handle( } for _, file := range response.File { if err := responseWriter.AddFile(file); err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) return err } } @@ -95,3 +118,16 @@ func (h *binaryHandler) Handle( } return nil } + +func newStderrWriteCloser(delegate io.Writer, pluginPath string) io.WriteCloser { + switch filepath.Base(pluginPath) { + case "protoc-gen-swift": + // https://github.com/bufbuild/buf/issues/1736 + // Swallowing specific stderr message for protoc-gen-swift as protoc-gen-swift, see issue. + // This is all disgusting code but it's simple and it works. + // We did not document if pluginPath is normalized or not, so + return newProtocGenSwiftStderrWriteCloser(delegate) + default: + return ioextended.NopWriteCloser(delegate) + } +} diff --git a/vendor/github.com/bufbuild/buf/private/pkg/app/appproto/appprotoexec/generator.go b/vendor/github.com/bufbuild/buf/private/pkg/app/appproto/appprotoexec/generator.go index b9d09b81f3..4df7bafd33 100644 --- a/vendor/github.com/bufbuild/buf/private/pkg/app/appproto/appprotoexec/generator.go +++ b/vendor/github.com/bufbuild/buf/private/pkg/app/appproto/appprotoexec/generator.go @@ -55,11 +55,11 @@ func (g *generator) Generate( option(generateOptions) } handler, err := NewHandler( - g.logger, g.storageosProvider, g.runner, pluginName, - HandlerWithPluginPath(generateOptions.pluginPath), + HandlerWithPluginPath(generateOptions.pluginPath...), + HandlerWithProtocPath(generateOptions.protocPath), ) if err != nil { return nil, err @@ -75,7 +75,8 @@ func (g *generator) Generate( } type generateOptions struct { - pluginPath string + pluginPath []string + protocPath string } func newGenerateOptions() *generateOptions { diff --git a/vendor/github.com/bufbuild/buf/private/pkg/app/appproto/appprotoexec/protoc_gen_swift_stderr_write_closer.go b/vendor/github.com/bufbuild/buf/private/pkg/app/appproto/appprotoexec/protoc_gen_swift_stderr_write_closer.go new file mode 100644 index 0000000000..dc9811fba5 --- /dev/null +++ b/vendor/github.com/bufbuild/buf/private/pkg/app/appproto/appprotoexec/protoc_gen_swift_stderr_write_closer.go @@ -0,0 +1,73 @@ +// Copyright 2020-2023 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package appprotoexec + +import ( + "bytes" + "errors" + "io" +) + +type protocGenSwiftStderrWriteCloser struct { + delegate io.Writer + buffer *bytes.Buffer +} + +func newProtocGenSwiftStderrWriteCloser(delegate io.Writer) io.WriteCloser { + return &protocGenSwiftStderrWriteCloser{ + delegate: delegate, + buffer: bytes.NewBuffer(nil), + } +} + +func (p *protocGenSwiftStderrWriteCloser) Write(data []byte) (int, error) { + // If protoc-gen-swift, we want to capture all the stderr so we can process it. + return p.buffer.Write(data) +} + +func (p *protocGenSwiftStderrWriteCloser) Close() error { + data := p.buffer.Bytes() + if len(data) == 0 { + return nil + } + newData := bytes.ReplaceAll( + data, + // If swift-protobuf changes their error message, this may not longer filter properly + // but this is OK - this filtering should be treated as non-critical. + // https://github.com/apple/swift-protobuf/blob/c3d060478fcf1f564be0a3876bde8c04247793ae/Sources/protoc-gen-swift/main.swift#L244 + // + // Note that our heuristic as to whether this is protoc-gen-swift or not for isProtocGenSwift + // is that the binary is named protoc-gen-swift, and protoc-gen-swift will print the binary name + // before any message to stderr, so given our protoc-gen-swift heuristic, this is the + // error message that will be printed. + // https://github.com/apple/swift-protobuf/blob/c3d060478fcf1f564be0a3876bde8c04247793ae/Sources/protoc-gen-swift/FileIo.swift#L19 + // + // Tested manually on Mac. + // TODO: Test manually on Windows. + []byte("protoc-gen-swift: WARNING: unknown version of protoc, use 3.2.x or later to ensure JSON support is correct.\n"), + nil, + ) + if len(newData) == 0 { + return nil + } + n, err := p.delegate.Write(newData) + if err != nil { + return err + } + if n != len(newData) { + return errors.New("incomplete write") + } + return nil +} diff --git a/vendor/github.com/bufbuild/buf/private/pkg/app/appproto/appprotoexec/protoc_proxy_handler.go b/vendor/github.com/bufbuild/buf/private/pkg/app/appproto/appprotoexec/protoc_proxy_handler.go index 87917bb61f..0f659308d6 100644 --- a/vendor/github.com/bufbuild/buf/private/pkg/app/appproto/appprotoexec/protoc_proxy_handler.go +++ b/vendor/github.com/bufbuild/buf/private/pkg/app/appproto/appprotoexec/protoc_proxy_handler.go @@ -30,35 +30,36 @@ import ( "github.com/bufbuild/buf/private/pkg/storage" "github.com/bufbuild/buf/private/pkg/storage/storageos" "github.com/bufbuild/buf/private/pkg/tmp" - "go.opencensus.io/trace" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" "go.uber.org/multierr" - "go.uber.org/zap" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/descriptorpb" "google.golang.org/protobuf/types/pluginpb" ) type protocProxyHandler struct { - logger *zap.Logger storageosProvider storageos.Provider runner command.Runner protocPath string pluginName string + tracer trace.Tracer } func newProtocProxyHandler( - logger *zap.Logger, storageosProvider storageos.Provider, runner command.Runner, protocPath string, pluginName string, ) *protocProxyHandler { return &protocProxyHandler{ - logger: logger.Named("appprotoexec"), storageosProvider: storageosProvider, runner: runner, protocPath: protocPath, pluginName: pluginName, + tracer: otel.GetTracerProvider().Tracer("bufbuild/buf"), } } @@ -68,9 +69,16 @@ func (h *protocProxyHandler) Handle( responseWriter appproto.ResponseBuilder, request *pluginpb.CodeGeneratorRequest, ) (retErr error) { - ctx, span := trace.StartSpan(ctx, "protoc_proxy") - span.AddAttributes(trace.StringAttribute("plugin", filepath.Base(h.pluginName))) + ctx, span := h.tracer.Start(ctx, "protoc_proxy", trace.WithAttributes( + attribute.Key("plugin").String(filepath.Base(h.pluginName)), + )) defer span.End() + defer func() { + if retErr != nil { + span.RecordError(retErr) + span.SetStatus(codes.Error, retErr.Error()) + } + }() protocVersion, err := h.getProtocVersion(ctx, container) if err != nil { return err diff --git a/vendor/github.com/bufbuild/buf/private/pkg/encoding/encoding.go b/vendor/github.com/bufbuild/buf/private/pkg/encoding/encoding.go index 62ede907be..c09a8368c2 100644 --- a/vendor/github.com/bufbuild/buf/private/pkg/encoding/encoding.go +++ b/vendor/github.com/bufbuild/buf/private/pkg/encoding/encoding.go @@ -169,24 +169,34 @@ func NewYAMLDecoderNonStrict(reader io.Reader) *yaml.Decoder { // used with JSON or YAML fields that need to support both string slices // and string literals. func InterfaceSliceOrStringToCommaSepString(in interface{}) (string, error) { - var opt string + values, err := InterfaceSliceOrStringToStringSlice(in) + if err != nil { + return "", err + } + return strings.Join(values, ","), nil +} + +func InterfaceSliceOrStringToStringSlice(in interface{}) ([]string, error) { + if in == nil { + return nil, nil + } switch t := in.(type) { case string: - opt = t + return []string{t}, nil case []interface{}: - opts := make([]string, len(t)) + if len(t) == 0 { + return nil, nil + } + res := make([]string, len(t)) for i, elem := range t { s, ok := elem.(string) if !ok { - return "", fmt.Errorf("could not convert element %T to a string", elem) + return nil, fmt.Errorf("could not convert element %T to a string", elem) } - opts[i] = s + res[i] = s } - opt = strings.Join(opts, ",") - case nil: - // If the variable is omitted, the value is nil + return res, nil default: - return "", fmt.Errorf("unknown type %T for opt", t) + return nil, fmt.Errorf("could not interpret %T as string or string slice", in) } - return opt, nil } diff --git a/vendor/github.com/bufbuild/buf/private/pkg/git/cloner.go b/vendor/github.com/bufbuild/buf/private/pkg/git/cloner.go index ac6611004a..ae279540df 100644 --- a/vendor/github.com/bufbuild/buf/private/pkg/git/cloner.go +++ b/vendor/github.com/bufbuild/buf/private/pkg/git/cloner.go @@ -27,23 +27,29 @@ import ( "github.com/bufbuild/buf/private/pkg/storage" "github.com/bufbuild/buf/private/pkg/storage/storageos" "github.com/bufbuild/buf/private/pkg/tmp" - "go.opencensus.io/trace" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" "go.uber.org/multierr" "go.uber.org/zap" ) -// bufCloneOrigin is the name for the remote. It helps distinguish the origin of -// the repo we're cloning from the "origin" of our clone (which is the repo -// being cloned). -// We can fetch directly from an origin URL, but without any remote set git LFS -// will fail to fetch so we need to pick something. -const bufCloneOrigin = "bufCloneOrigin" +const ( + // bufCloneOrigin is the name for the remote. It helps distinguish the origin of + // the repo we're cloning from the "origin" of our clone (which is the repo + // being cloned). + // We can fetch directly from an origin URL, but without any remote set git LFS + // will fail to fetch so we need to pick something. + bufCloneOrigin = "bufCloneOrigin" + tracerName = "bufbuild/buf/cloner" +) type cloner struct { logger *zap.Logger storageosProvider storageos.Provider runner command.Runner options ClonerOptions + tracer trace.Tracer } func newCloner( @@ -57,6 +63,7 @@ func newCloner( storageosProvider: storageosProvider, runner: runner, options: options, + tracer: otel.GetTracerProvider().Tracer(tracerName), } } @@ -68,8 +75,14 @@ func (c *cloner) CloneToBucket( writeBucket storage.WriteBucket, options CloneToBucketOptions, ) (retErr error) { - ctx, span := trace.StartSpan(ctx, "git_clone_to_bucket") + ctx, span := c.tracer.Start(ctx, "git_clone_to_bucket") defer span.End() + defer func() { + if retErr != nil { + span.RecordError(retErr) + span.SetStatus(codes.Error, retErr.Error()) + } + }() var err error switch { @@ -83,13 +96,18 @@ func (c *cloner) CloneToBucket( } if depth == 0 { - return errors.New("depth must be > 0") + err := errors.New("depth must be > 0") + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + return err } depthArg := strconv.Itoa(int(depth)) bareDir, err := tmp.NewDir() if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) return err } defer func() { @@ -241,10 +259,14 @@ func (c *cloner) CloneToBucket( if options.Mapper != nil { readBucket = storage.MapReadBucket(readBucket, options.Mapper) } - ctx, span2 := trace.StartSpan(ctx, "git_clone_to_bucket_copy") + ctx, span2 := c.tracer.Start(ctx, "git_clone_to_bucket_copy") defer span2.End() // do NOT copy external paths _, err = storage.Copy(ctx, readBucket, writeBucket) + if err != nil { + span2.RecordError(err) + span2.SetStatus(codes.Error, err.Error()) + } return err } diff --git a/vendor/github.com/bufbuild/buf/private/pkg/git/git.go b/vendor/github.com/bufbuild/buf/private/pkg/git/git.go index e80aea2067..e1c031cb2e 100644 --- a/vendor/github.com/bufbuild/buf/private/pkg/git/git.go +++ b/vendor/github.com/bufbuild/buf/private/pkg/git/git.go @@ -16,6 +16,7 @@ package git import ( "context" + "regexp" "github.com/bufbuild/buf/private/pkg/app" "github.com/bufbuild/buf/private/pkg/command" @@ -92,3 +93,43 @@ type ClonerOptions struct { SSHKeyFileEnvKey string SSHKnownHostsFilesEnvKey string } + +// Lister lists files in git repositories. +type Lister interface { + // ListFilesAndUnstagedFiles lists all files checked into git except those that + // were deleted, and also lists unstaged files. + // + // This does not list unstaged deleted files + // This does not list unignored files that were not added. + // This ignores regular files. + // + // This is used for situations like license headers where we want all the + // potential git files during development. + // + // The returned paths will be unnormalized. + // + // This is the equivalent of doing: + // + // comm -23 \ + // <(git ls-files --cached --modified --others --no-empty-directory --exclude-standard | sort -u | grep -v -e IGNORE_PATH1 -e IGNORE_PATH2) \ + // <(git ls-files --deleted | sort -u) + ListFilesAndUnstagedFiles( + ctx context.Context, + envContainer app.EnvStdioContainer, + options ListFilesAndUnstagedFilesOptions, + ) ([]string, error) +} + +// NewLister returns a new Lister. +func NewLister(runner command.Runner) Lister { + return newLister(runner) +} + +// ListFilesAndUnstagedFilesOptions are options for ListFilesAndUnstagedFiles. +type ListFilesAndUnstagedFilesOptions struct { + // IgnorePathRegexps are regexes of paths to ignore. + // + // These must be unnormalized in the manner of the local OS that the Lister + // is being applied to. + IgnorePathRegexps []*regexp.Regexp +} diff --git a/vendor/github.com/bufbuild/buf/private/pkg/git/lister.go b/vendor/github.com/bufbuild/buf/private/pkg/git/lister.go new file mode 100644 index 0000000000..bdcc02d82a --- /dev/null +++ b/vendor/github.com/bufbuild/buf/private/pkg/git/lister.go @@ -0,0 +1,131 @@ +// Copyright 2020-2023 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package git + +import ( + "context" + "os" + "regexp" + + "github.com/bufbuild/buf/private/pkg/app" + "github.com/bufbuild/buf/private/pkg/command" + "github.com/bufbuild/buf/private/pkg/stringutil" +) + +type lister struct { + runner command.Runner +} + +func newLister(runner command.Runner) *lister { + return &lister{ + runner: runner, + } +} + +func (l *lister) ListFilesAndUnstagedFiles( + ctx context.Context, + container app.EnvStdioContainer, + options ListFilesAndUnstagedFilesOptions, +) ([]string, error) { + allFilesOutput, err := command.RunStdout( + ctx, + container, + l.runner, + "git", + "ls-files", + "--cached", + "--modified", + "--others", + "--exclude-standard", + ) + if err != nil { + return nil, err + } + deletedFilesOutput, err := command.RunStdout( + ctx, + container, + l.runner, + "git", + "ls-files", + "--deleted", + ) + if err != nil { + return nil, err + } + return stringutil.SliceToUniqueSortedSlice( + filterNonRegularFiles( + stringSliceExceptMatches( + stringSliceExcept( + // This may not work in all Windows scenarios as we only split on "\n" but + // this is no worse than we previously had. + stringutil.SplitTrimLinesNoEmpty(string(allFilesOutput)), + stringutil.SplitTrimLinesNoEmpty(string(deletedFilesOutput)), + ), + options.IgnorePathRegexps, + ), + ), + ), nil +} + +// stringSliceExcept returns all elements in source that are not in except. +func stringSliceExcept(source []string, except []string) []string { + exceptMap := stringutil.SliceToMap(except) + result := make([]string, 0, len(source)) + for _, s := range source { + if _, ok := exceptMap[s]; !ok { + result = append(result, s) + } + } + return result +} + +// stringSliceExceptMatches returns all elements in source that do not match +// any of the regexps. +func stringSliceExceptMatches(source []string, regexps []*regexp.Regexp) []string { + if len(regexps) == 0 { + return source + } + result := make([]string, 0, len(source)) + for _, s := range source { + if !matchesAny(s, regexps) { + result = append(result, s) + } + } + return result +} + +// matchesAny returns true if any of regexps match. +func matchesAny(s string, regexps []*regexp.Regexp) bool { + for _, regexp := range regexps { + if regexp.MatchString(s) { + return true + } + } + return false +} + +// filterNonRegularFiles returns all regular files. +// +// This does an os.Stat call, so the files must exist for this to work. +// Given our usage here, this is true by the time this function is called. +func filterNonRegularFiles(files []string) []string { + filteredFiles := make([]string, 0, len(files)) + for _, file := range files { + if fileInfo, err := os.Stat(file); err == nil && fileInfo.Mode().IsRegular() { + filteredFiles = append(filteredFiles, file) + } + } + return filteredFiles +} diff --git a/vendor/github.com/bufbuild/buf/private/pkg/manifest/digest.go b/vendor/github.com/bufbuild/buf/private/pkg/manifest/digest.go new file mode 100644 index 0000000000..93099a9f80 --- /dev/null +++ b/vendor/github.com/bufbuild/buf/private/pkg/manifest/digest.go @@ -0,0 +1,137 @@ +// Copyright 2020-2023 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package manifest + +import ( + "bytes" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" + + "golang.org/x/crypto/sha3" +) + +// DigestType is the type for digests in this package. +type DigestType string + +const ( + DigestTypeShake256 DigestType = "shake256" + + shake256Length = 64 +) + +// Digest represents a hash function's value. +type Digest struct { + dtype DigestType + digest []byte + hexstr string +} + +// NewDigestFromBytes builds a digest from a type and the digest bytes. +func NewDigestFromBytes(dtype DigestType, digest []byte) (*Digest, error) { + if dtype == "" { + return nil, errors.New("digest type cannot be empty") + } + if dtype != DigestTypeShake256 { + return nil, fmt.Errorf("unsupported digest type: %q", dtype) + } + if len(digest) != shake256Length { + return nil, fmt.Errorf( + "invalid digest: got %d bytes, expected %d bytes for type %q", + len(digest), shake256Length, dtype, + ) + } + return &Digest{ + dtype: dtype, + digest: digest, + hexstr: hex.EncodeToString(digest), + }, nil +} + +// NewDigestFromHex builds a digest from a type and the hexadecimal string of +// the bytes. It returns an error if the received string is not a valid hex. +func NewDigestFromHex(dtype DigestType, hexstr string) (*Digest, error) { + digest, err := hex.DecodeString(hexstr) + if err != nil { + return nil, err + } + return NewDigestFromBytes(dtype, digest) +} + +// NewDigestFromString build a digest from a string representation of it. +func NewDigestFromString(typedDigest string) (*Digest, error) { + dtype, hexstr, found := strings.Cut(typedDigest, ":") + if !found { + return nil, errors.New("malformed digest string") + } + return NewDigestFromHex(DigestType(dtype), hexstr) +} + +// String returns the hash in a manifest's string format: ":". +func (d *Digest) String() string { + return string(d.dtype) + ":" + d.hexstr +} + +// Type returns the digest type. +func (d *Digest) Type() DigestType { + return d.dtype +} + +// Bytes returns the digest bytes. +func (d *Digest) Bytes() []byte { + return d.digest +} + +// Hex returns the digest bytes in its hexadecimal string representation. +func (d *Digest) Hex() string { + return d.hexstr +} + +// Equal compares the digest type and bytes with other digest. +func (d *Digest) Equal(other Digest) bool { + return d.dtype == other.dtype && bytes.Equal(d.digest, other.digest) +} + +// Digester is something that can digest a content into a digest. +type Digester interface { + Digest(content io.Reader) (*Digest, error) +} + +type shake256Digester struct { + hash sha3.ShakeHash +} + +func NewDigester(dtype DigestType) (Digester, error) { + if dtype != DigestTypeShake256 { + return nil, fmt.Errorf("not supported digest type %q", dtype) + } + return &shake256Digester{hash: sha3.NewShake256()}, nil +} + +func (d *shake256Digester) Digest(content io.Reader) (*Digest, error) { + d.hash.Reset() + if _, err := io.Copy(d.hash, content); err != nil { + return nil, err + } + digest := make([]byte, shake256Length) + if _, err := d.hash.Read(digest); err != nil { + // sha3.ShakeHash never errors or short reads. Something horribly wrong + // happened if your computer ended up here. + return nil, err + } + return NewDigestFromBytes(DigestTypeShake256, digest) +} diff --git a/vendor/github.com/bufbuild/buf/private/pkg/manifest/manifest.go b/vendor/github.com/bufbuild/buf/private/pkg/manifest/manifest.go new file mode 100644 index 0000000000..f5725e0e64 --- /dev/null +++ b/vendor/github.com/bufbuild/buf/private/pkg/manifest/manifest.go @@ -0,0 +1,195 @@ +// Copyright 2020-2023 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Manifests are a list of paths and their hash digests, canonically ordered by +// path in increasing lexographical order. Manifests are encoded as: +// +// :[SP][SP][LF] +// +// "shake256" is the only supported digest type. The digest is 64 bytes of hex +// encoded output of SHAKE256. See golang.org/x/crypto/sha3 and FIPS 202 for +// details on the SHAKE hash. +package manifest + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "sort" + "strings" +) + +var errNoFinalNewline = errors.New("partial record: missing newline") + +func newError(lineno int, msg string) error { + return fmt.Errorf("invalid manifest: %d: %s", lineno, msg) +} + +func newErrorWrapped(lineno int, err error) error { + return fmt.Errorf("invalid manifest: %d: %w", lineno, err) +} + +// Manifest represents a list of paths and their digests. +type Manifest struct { + pathToDigest map[string]Digest + digestToPaths map[string][]string +} + +var _ encoding.TextMarshaler = (*Manifest)(nil) +var _ encoding.TextUnmarshaler = (*Manifest)(nil) + +// New creates an empty manifest. +func New() *Manifest { + return &Manifest{ + pathToDigest: make(map[string]Digest), + digestToPaths: make(map[string][]string), + } +} + +// NewFromReader builds a manifest from an encoded manifest reader. +func NewFromReader(manifest io.Reader) (*Manifest, error) { + m := New() + scanner := bufio.NewScanner(manifest) + scanner.Split(splitManifest) + lineno := 0 + for scanner.Scan() { + lineno++ + encodedDigest, path, found := strings.Cut(scanner.Text(), " ") + if !found { + return nil, newError(lineno, "invalid entry") + } + digest, err := NewDigestFromString(encodedDigest) + if err != nil { + return nil, newErrorWrapped(lineno, err) + } + if err := m.AddEntry(path, *digest); err != nil { + return nil, newErrorWrapped(lineno, err) + } + } + if err := scanner.Err(); err != nil { + if err == errNoFinalNewline { + return nil, newError(lineno, "partial record") + } + return nil, err + } + return m, nil +} + +// AddEntry adds an entry to the manifest with a path and its digest. It fails +// if the path already exists in the manifest with a different digest. +func (m *Manifest) AddEntry(path string, digest Digest) error { + if path == "" { + return errors.New("empty path") + } + if digest.Type() == "" || digest.Hex() == "" { + return errors.New("invalid digest") + } + if existingDigest, exists := m.pathToDigest[path]; exists { + if existingDigest.Equal(digest) { + return nil // same entry already in the manifest, nothing to do + } + return fmt.Errorf( + "cannot add digest %q for path %q (already associated to digest %q)", + digest.String(), path, existingDigest.String(), + ) + } + m.pathToDigest[path] = digest + key := digest.String() + m.digestToPaths[key] = append(m.digestToPaths[key], path) + return nil +} + +// Paths returns all paths in the manifest. +func (m *Manifest) Paths() []string { + paths := make([]string, 0, len(m.pathToDigest)) + for path := range m.pathToDigest { + paths = append(paths, path) + } + return paths +} + +// PathsFor returns one or more matching path for a given digest. The digest is +// expected to be a lower-case hex encoded value. Returned paths are unordered. +// Paths is nil and ok is false if no paths are found. +func (m *Manifest) PathsFor(digest string) ([]string, bool) { + paths, ok := m.digestToPaths[digest] + if !ok || len(paths) == 0 { + return nil, false + } + return paths, true +} + +// DigestFor returns the matching digest for the given path. The path must be an +// exact match. Digest is nil and ok is false if no digest is found. +func (m *Manifest) DigestFor(path string) (*Digest, bool) { + digest, ok := m.pathToDigest[path] + if !ok { + return nil, false + } + return &digest, true +} + +// MarshalText encodes the manifest into its canonical form. +func (m *Manifest) MarshalText() ([]byte, error) { + var coded bytes.Buffer + paths := m.Paths() + sort.Strings(paths) + for _, path := range paths { + digest := m.pathToDigest[path] + if _, err := fmt.Fprintf(&coded, "%s %s\n", &digest, path); err != nil { + return nil, err + } + } + return coded.Bytes(), nil +} + +// UnmarshalText decodes a manifest from member. +// +// Use NewManifestFromReader if you have an io.Reader and want to avoid memory +// copying. +func (m *Manifest) UnmarshalText(text []byte) error { + newm, err := NewFromReader(bytes.NewReader(text)) + if err != nil { + return err + } + m.pathToDigest = newm.pathToDigest + m.digestToPaths = newm.digestToPaths + return nil +} + +// Blob returns the manifest's blob. +func (m *Manifest) Blob() (Blob, error) { + manifestText, err := m.MarshalText() + if err != nil { + return nil, err + } + return NewMemoryBlobFromReader(bytes.NewReader(manifestText)) +} + +func splitManifest(data []byte, atEOF bool) (int, []byte, error) { + // Return a line without LF. + if i := bytes.IndexByte(data, '\n'); i >= 0 { + return i + 1, data[0:i], nil + } + + // EOF occurred with a partial line. + if atEOF && len(data) != 0 { + return 0, nil, errNoFinalNewline + } + + return 0, nil, nil +} diff --git a/vendor/github.com/bufbuild/buf/private/pkg/manifest/module.go b/vendor/github.com/bufbuild/buf/private/pkg/manifest/module.go new file mode 100644 index 0000000000..58ab2e5dc3 --- /dev/null +++ b/vendor/github.com/bufbuild/buf/private/pkg/manifest/module.go @@ -0,0 +1,251 @@ +// Copyright 2020-2023 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package manifest + +import ( + "bytes" + "context" + "fmt" + "io" + + "go.uber.org/multierr" +) + +// Blob is a blob with a digest and a content. +type Blob interface { + Digest() *Digest + Open(context.Context) (io.ReadCloser, error) +} + +type memoryBlob struct { + digest Digest + content []byte +} + +var _ Blob = (*memoryBlob)(nil) + +type memoryBlobOptions struct { + validateDigest bool +} + +// MemoryBlobOption are options passed when creating a new memory blob. +type MemoryBlobOption func(*memoryBlobOptions) + +// MemoryBlobWithDigestValidation checks that the passed content and digest match. +func MemoryBlobWithDigestValidation() MemoryBlobOption { + return func(opts *memoryBlobOptions) { + opts.validateDigest = true + } +} + +// NewMemoryBlob takes a digest and a content, and turns it into an in-memory +// representation of a blob, which returns the digest and an io.ReadCloser for +// its content. +func NewMemoryBlob(digest Digest, content []byte, opts ...MemoryBlobOption) (Blob, error) { + var config memoryBlobOptions + for _, option := range opts { + option(&config) + } + if config.validateDigest { + digester, err := NewDigester(digest.Type()) + if err != nil { + return nil, err + } + contentDigest, err := digester.Digest(bytes.NewReader(content)) + if err != nil { + return nil, err + } + if !digest.Equal(*contentDigest) { + return nil, fmt.Errorf("digest and content mismatch") + } + } + return &memoryBlob{ + digest: digest, + content: content, + }, nil +} + +func (b *memoryBlob) Digest() *Digest { + return &b.digest +} + +func (b *memoryBlob) Open(context.Context) (io.ReadCloser, error) { + return io.NopCloser(bytes.NewReader(b.content)), nil +} + +// BlobSet represents a set of deduplicated blobs, by digests. +type BlobSet struct { + digestToBlob map[string]Blob +} + +type blobSetOptions struct { + validateContent bool +} + +// BlobSetOption are options passed when creating a new blob set. +type BlobSetOption func(*blobSetOptions) + +// BlobSetWithContentValidation turns on content validation for all the blobs +// when creating a new BlobSet. If this option is on, blobs with the same digest +// must have the same content (in case blobs with the same digest are sent). If +// this option is not passed, then the latest duplicated blob digest content +// will prevail in the set. +func BlobSetWithContentValidation() BlobSetOption { + return func(opts *blobSetOptions) { + opts.validateContent = true + } +} + +// NewBlobSet receives a slice of blobs, and de-duplicates them into a BlobSet. +func NewBlobSet(ctx context.Context, blobs []Blob, opts ...BlobSetOption) (*BlobSet, error) { + var config blobSetOptions + for _, option := range opts { + option(&config) + } + digestToBlobs := make(map[string]Blob, len(blobs)) + for _, b := range blobs { + digestStr := b.Digest().String() + if config.validateContent { + existingBlob, alreadyPresent := digestToBlobs[digestStr] + if alreadyPresent { + equalContent, err := BlobEqual(ctx, b, existingBlob) + if err != nil { + return nil, fmt.Errorf("compare duplicated blobs with digest %q: %w", digestStr, err) + } + if !equalContent { + return nil, fmt.Errorf("duplicated blobs with digest %q have different contents", digestStr) + } + } + } + digestToBlobs[digestStr] = b + } + return &BlobSet{digestToBlob: digestToBlobs}, nil +} + +// BlobFor returns the blob for the passed digest string, or nil, ok=false if +// the digest has no blob in the set. +func (s *BlobSet) BlobFor(digest string) (Blob, bool) { + blob, ok := s.digestToBlob[digest] + if !ok { + return nil, false + } + return blob, true +} + +// Blobs returns a slice of the blobs in the set. +func (s *BlobSet) Blobs() []Blob { + blobs := make([]Blob, 0, len(s.digestToBlob)) + for _, b := range s.digestToBlob { + blobs = append(blobs, b) + } + return blobs +} + +// NewMemoryBlobFromReader creates a memory blob from content, which is read +// until completion. The returned blob contains all bytes read. If you are using +// this in a loop, you might better use NewMemoryBlobFromReaderWithDigester so +// you can reuse your digester. +func NewMemoryBlobFromReader(content io.Reader) (Blob, error) { + digester, err := NewDigester(DigestTypeShake256) + if err != nil { + return nil, err + } + return NewMemoryBlobFromReaderWithDigester(content, digester) +} + +// NewMemoryBlobFromReaderWithDigester creates a memory blob from content with +// the passed digester. The content is read until completion. The returned blob +// contains all bytes read. +func NewMemoryBlobFromReaderWithDigester(content io.Reader, digester Digester) (Blob, error) { + var contentInMemory bytes.Buffer + tee := io.TeeReader(content, &contentInMemory) + digest, err := digester.Digest(tee) + if err != nil { + return nil, err + } + return &memoryBlob{ + digest: *digest, + content: contentInMemory.Bytes(), + }, nil +} + +// BlobEqual returns true if blob a is the same as blob b. The digest is +// checked for equality and the content bytes compared. +// +// An error is returned if an unexpected I/O error occurred when opening, +// reading, or closing either blob. +func BlobEqual(ctx context.Context, a, b Blob) (_ bool, retErr error) { + const blockSize = 4096 + if !a.Digest().Equal(*b.Digest()) { + // digests don't match + return false, nil + } + aFile, err := a.Open(ctx) + if err != nil { + return false, err + } + defer func() { retErr = multierr.Append(retErr, aFile.Close()) }() + bFile, err := b.Open(ctx) + if err != nil { + return false, err + } + defer func() { retErr = multierr.Append(retErr, bFile.Close()) }() + // Read blockSize from a, then from b, and compare. + aBlock := make([]byte, blockSize) + bBlock := make([]byte, blockSize) + for { + aN, aErr := aFile.Read(aBlock) + bN, bErr := io.ReadAtLeast(bFile, bBlock[:aN], aN) // exactly aN bytes + // We're running unexpected error processing (not EOF) before comparing + // bytes because it doesn't matter if the returned bytes match if an + // error occurred before an expected EOF. + if bErr == io.ErrUnexpectedEOF { + // b is shorter; we can error early + return false, nil + } + if aErr != nil && aErr != io.EOF { + // unexpected read error + return false, aErr + } + if bErr != nil && bErr != io.EOF { + // unexpected read error + return false, bErr + } + if !bytes.Equal(aBlock[:aN], bBlock[:bN]) { + // Read content doesn't match. + return false, nil + } + if aErr == io.EOF || bErr == io.EOF { + // EOF + break + } + } + aN, aErr := aFile.Read(aBlock[:1]) + bN, bErr := bFile.Read(bBlock[:1]) + if aN == 0 && bN == 0 && aErr == io.EOF && bErr == io.EOF { + // a and b are at EOF with no more data for us + return true, nil + } + // either a or b are longer + return false, multierr.Append(nilEOF(aErr), nilEOF(bErr)) +} + +// nilEOF maps io.EOF to nil +func nilEOF(err error) error { + if err == io.EOF { + return nil + } + return err +} diff --git a/vendor/github.com/bufbuild/buf/private/pkg/manifest/storage.go b/vendor/github.com/bufbuild/buf/private/pkg/manifest/storage.go new file mode 100644 index 0000000000..2c6f77ebf0 --- /dev/null +++ b/vendor/github.com/bufbuild/buf/private/pkg/manifest/storage.go @@ -0,0 +1,195 @@ +// Copyright 2020-2023 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package manifest + +import ( + "context" + "fmt" + "io" + + "github.com/bufbuild/buf/private/pkg/normalpath" + "github.com/bufbuild/buf/private/pkg/storage" + "github.com/bufbuild/buf/private/pkg/storage/storageutil" + "go.uber.org/multierr" +) + +// manifestBucket is a storage.ReadBucket implementation from a manifest and an +// array of blobs. +type manifestBucket struct { + manifest Manifest + blobs BlobSet +} + +type manifestBucketObject struct { + path string + file io.ReadCloser +} + +func (o *manifestBucketObject) Path() string { return o.path } +func (o *manifestBucketObject) ExternalPath() string { return o.path } +func (o *manifestBucketObject) Read(p []byte) (int, error) { return o.file.Read(p) } +func (o *manifestBucketObject) Close() error { return o.file.Close() } + +// NewFromBucket creates a manifest and all its files' blobs from a storage +// bucket, with all its digests in DigestTypeShake256. +func NewFromBucket( + ctx context.Context, + bucket storage.ReadBucket, +) (*Manifest, *BlobSet, error) { + m := New() + digester, err := NewDigester(DigestTypeShake256) + if err != nil { + return nil, nil, err + } + var blobs []Blob + if walkErr := bucket.Walk(ctx, "", func(info storage.ObjectInfo) (retErr error) { + path := info.Path() + obj, err := bucket.Get(ctx, path) + if err != nil { + return err + } + defer func() { retErr = multierr.Append(retErr, obj.Close()) }() + blob, err := NewMemoryBlobFromReaderWithDigester(obj, digester) + if err != nil { + return err + } + blobs = append(blobs, blob) + return m.AddEntry(path, *blob.Digest()) + }); walkErr != nil { + return nil, nil, walkErr + } + blobSet, err := NewBlobSet(ctx, blobs) // no need to pass validation options, we're building and digesting the blobs + if err != nil { + return nil, nil, err + } + return m, blobSet, nil +} + +type bucketOptions struct { + allManifestBlobs bool + noExtraBlobs bool +} + +// BucketOption are options passed when creating a new manifest bucket. +type BucketOption func(*bucketOptions) + +// BucketWithAllManifestBlobsValidation validates that all manifest digests +// have a corresponding blob in the blob set. If this option is not passed, then +// buckets with partial/incomplete blobs are allowed. +func BucketWithAllManifestBlobsValidation() BucketOption { + return func(opts *bucketOptions) { + opts.allManifestBlobs = true + } +} + +// BucketWithNoExtraBlobsValidation validates that the passed blob set has no +// additional blobs beyond the ones in the manifest. +func BucketWithNoExtraBlobsValidation() BucketOption { + return func(opts *bucketOptions) { + opts.noExtraBlobs = true + } +} + +// NewBucket takes a manifest and a blob set and builds a readable storage +// bucket that contains the files in the manifest. +func NewBucket(m Manifest, blobs BlobSet, opts ...BucketOption) (storage.ReadBucket, error) { + var config bucketOptions + for _, option := range opts { + option(&config) + } + if config.allManifestBlobs { + for _, path := range m.Paths() { + pathDigest, ok := m.DigestFor(path) + if !ok { + // we're iterating manifest paths, this should never happen. + return nil, fmt.Errorf("path %q not present in manifest", path) + } + if _, ok := blobs.BlobFor(pathDigest.String()); !ok { + return nil, fmt.Errorf("manifest path %q with digest %q has no associated blob", path, pathDigest.String()) + } + } + } + if config.noExtraBlobs { + for digestStr := range blobs.digestToBlob { + if _, ok := m.PathsFor(digestStr); !ok { + return nil, fmt.Errorf("blob with digest %q is not present in the manifest", digestStr) + } + } + } + return &manifestBucket{ + manifest: m, + blobs: blobs, + }, nil +} + +// blobFor returns a blob for a given path. It returns the blob if found, or nil +// and ok=false if the path has no digest in the manifest, or if the blob for +// that digest is not present. +func (m *manifestBucket) blobFor(path string) (_ Blob, ok bool) { + digest, ok := m.manifest.DigestFor(path) + if !ok { + return nil, false + } + blob, ok := m.blobs.BlobFor(digest.String()) + if !ok { + return nil, false + } + return blob, true +} + +func (m *manifestBucket) Get(ctx context.Context, path string) (storage.ReadObjectCloser, error) { + blob, ok := m.blobFor(path) + if !ok { + return nil, storage.NewErrNotExist(path) + } + file, err := blob.Open(ctx) + if err != nil { + return nil, err + } + return &manifestBucketObject{ + path: path, + file: file, + }, nil +} + +func (m *manifestBucket) Stat(ctx context.Context, path string) (storage.ObjectInfo, error) { + if _, ok := m.blobFor(path); !ok { + return nil, storage.NewErrNotExist(path) + } + // storage.ObjectInfo only requires path + return &manifestBucketObject{path: path}, nil +} + +func (m *manifestBucket) Walk(ctx context.Context, prefix string, f func(storage.ObjectInfo) error) error { + prefix, err := storageutil.ValidatePrefix(prefix) + if err != nil { + return err + } + // walk order is not guaranteed + for _, path := range m.manifest.Paths() { + if !normalpath.EqualsOrContainsPath(prefix, path, normalpath.Relative) { + continue + } + if _, ok := m.blobFor(path); !ok { + // this could happen if the bucket was built with partial blobs + continue + } + // storage.ObjectInfo only requires path + if err := f(&manifestBucketObject{path: path}); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/bufbuild/buf/private/pkg/observability/usage.gen.go b/vendor/github.com/bufbuild/buf/private/pkg/manifest/usage.gen.go similarity index 96% rename from vendor/github.com/bufbuild/buf/private/pkg/observability/usage.gen.go rename to vendor/github.com/bufbuild/buf/private/pkg/manifest/usage.gen.go index bb913c1e8b..fe3b997004 100644 --- a/vendor/github.com/bufbuild/buf/private/pkg/observability/usage.gen.go +++ b/vendor/github.com/bufbuild/buf/private/pkg/manifest/usage.gen.go @@ -14,6 +14,6 @@ // Generated. DO NOT EDIT. -package observability +package manifest import _ "github.com/bufbuild/buf/private/usage" diff --git a/vendor/github.com/bufbuild/buf/private/pkg/observability/observability.go b/vendor/github.com/bufbuild/buf/private/pkg/observability/observability.go deleted file mode 100644 index 1d85ada6f0..0000000000 --- a/vendor/github.com/bufbuild/buf/private/pkg/observability/observability.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2020-2023 Buf Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package observability - -import ( - "io" - "net/http" - - "github.com/bufbuild/buf/private/pkg/ioextended" - "go.opencensus.io/plugin/ochttp" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - "go.opencensus.io/trace" - otelmetric "go.opentelemetry.io/otel/metric" - oteltrace "go.opentelemetry.io/otel/trace" -) - -// TraceExportCloser describes the interface used to export OpenCensus traces -// and cleaning of resources. -type TraceExportCloser interface { - trace.Exporter - io.Closer -} - -// ViewExportCloser describes the interface used to export OpenCensus views -// and cleaning of resources. -type ViewExportCloser interface { - view.Exporter - io.Closer -} - -// TraceViewExportCloser implements both OpenCensus view and trace exporting. -type TraceViewExportCloser interface { - ViewExportCloser - TraceExportCloser -} - -type TracerProviderCloser interface { - oteltrace.TracerProvider - io.Closer -} - -type MeterProviderCloser interface { - otelmetric.MeterProvider - io.Closer -} - -// Start initializes tracing. -// -// Tracing is a global function due to how go.opencensus.io is written. -// The returned io.Closer needs to be called at the completion of the program. -func Start(options ...StartOption) io.Closer { - startOptions := newStartOptions() - for _, option := range options { - option(startOptions) - } - if len(startOptions.traceExportClosers) == 0 && len(startOptions.traceViewExportClosers) == 0 { - return ioextended.NopCloser - } - trace.ApplyConfig( - trace.Config{ - DefaultSampler: trace.AlwaysSample(), - }, - ) - var closers []io.Closer - for _, traceExportCloser := range startOptions.traceExportClosers { - traceExportCloser := traceExportCloser - trace.RegisterExporter(traceExportCloser) - closers = append(closers, traceExportCloser) - } - for _, viewExportCloser := range startOptions.viewExportClosers { - viewExportCloser := viewExportCloser - view.RegisterExporter(viewExportCloser) - closers = append(closers, viewExportCloser) - } - for _, traceViewExportCloser := range startOptions.traceViewExportClosers { - traceViewExportCloser := traceViewExportCloser - trace.RegisterExporter(traceViewExportCloser) - view.RegisterExporter(traceViewExportCloser) - closers = append(closers, traceViewExportCloser) - } - for _, tracerProviderCloser := range startOptions.tracerProviderClosers { - tracerProviderCloser := tracerProviderCloser - closers = append(closers, tracerProviderCloser) - } - for _, meterProviderCloser := range startOptions.meterProviderClosers { - meterProviderCloser := meterProviderCloser - closers = append(closers, meterProviderCloser) - } - return ioextended.ChainCloser(closers...) -} - -// StartOption is an option for start. -type StartOption func(*startOptions) - -// StartWithTraceExportCloser returns a new StartOption that adds the given TraceExportCloser. -func StartWithTraceExportCloser(traceExportCloser TraceExportCloser) StartOption { - return func(startOptions *startOptions) { - startOptions.traceExportClosers = append(startOptions.traceExportClosers, traceExportCloser) - } -} - -// StartWithViewExportCloser returns a new StartOption that adds the given TraceExportCloser. -func StartWithViewExportCloser(viewExportCloser ViewExportCloser) StartOption { - return func(startOptions *startOptions) { - startOptions.viewExportClosers = append(startOptions.viewExportClosers, viewExportCloser) - } -} - -// StartWithTraceViewExportCloser returns a new StartOption that adds the given TraceViewExportCloser. -func StartWithTraceViewExportCloser(traceViewExportCloser TraceViewExportCloser) StartOption { - return func(startOptions *startOptions) { - startOptions.traceViewExportClosers = append(startOptions.traceViewExportClosers, traceViewExportCloser) - } -} - -func StartWithTracerProviderCloser(tracerProviderCloser TracerProviderCloser) StartOption { - return func(startOptions *startOptions) { - startOptions.tracerProviderClosers = append(startOptions.tracerProviderClosers, tracerProviderCloser) - } -} - -func StartWithMeterProviderCloser(meterProviderCloser MeterProviderCloser) StartOption { - return func(startOptions *startOptions) { - startOptions.meterProviderClosers = append(startOptions.meterProviderClosers, meterProviderCloser) - } -} - -// NewHTTPTransport returns a HTTP transport instrumented with OpenCensus traces and metrics. -func NewHTTPTransport(base http.RoundTripper, tags ...tag.Mutator) http.RoundTripper { - return &wrappedRoundTripper{ - Base: &ochttp.Transport{ - NewClientTrace: ochttp.NewSpanAnnotatingClientTrace, - Base: base, - }, - Tags: tags, - } -} - -// NewHTTPClient returns a HTTP client instrumented with OpenCensus traces and metrics. -func NewHTTPClient() *http.Client { - return &http.Client{ - Transport: NewHTTPTransport(http.DefaultTransport), - } -} - -type startOptions struct { - traceExportClosers []TraceExportCloser - viewExportClosers []ViewExportCloser - traceViewExportClosers []TraceViewExportCloser - tracerProviderClosers []TracerProviderCloser - meterProviderClosers []MeterProviderCloser -} - -func newStartOptions() *startOptions { - return &startOptions{} -} diff --git a/vendor/github.com/bufbuild/buf/private/pkg/observability/observabilityzap/trace_export_closer.go b/vendor/github.com/bufbuild/buf/private/pkg/observability/observabilityzap/trace_export_closer.go deleted file mode 100644 index 3e7482648a..0000000000 --- a/vendor/github.com/bufbuild/buf/private/pkg/observability/observabilityzap/trace_export_closer.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2020-2023 Buf Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package observabilityzap - -import ( - "go.opencensus.io/trace" - "go.uber.org/zap" -) - -type traceExportCloser struct { - logger *zap.Logger -} - -func newTraceExportCloser(logger *zap.Logger) *traceExportCloser { - return &traceExportCloser{ - logger: logger, - } -} - -// ExportSpan implements the opencensus trace.Exporter interface. -func (t *traceExportCloser) ExportSpan(spanData *trace.SpanData) { - if spanData == nil || !spanData.IsSampled() { - return - } - if checkedEntry := t.logger.Check(zap.DebugLevel, spanData.Name); checkedEntry != nil { - fields := []zap.Field{ - zap.Duration("duration", spanData.EndTime.Sub(spanData.StartTime)), - } - for key, value := range spanData.Attributes { - fields = append(fields, zap.Any(key, value)) - } - checkedEntry.Write(fields...) - } -} - -func (t *traceExportCloser) Close() error { - return nil -} diff --git a/vendor/github.com/bufbuild/buf/private/pkg/observabilityzap/observabilityzap.go b/vendor/github.com/bufbuild/buf/private/pkg/observabilityzap/observabilityzap.go new file mode 100644 index 0000000000..fa746b33b5 --- /dev/null +++ b/vendor/github.com/bufbuild/buf/private/pkg/observabilityzap/observabilityzap.go @@ -0,0 +1,36 @@ +// Copyright 2020-2023 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package observabilityzap + +import ( + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/sdk/trace" + "go.uber.org/zap" +) + +// Start creates a Zap logging exporter for Opentelemetry traces and returns +// the exporter. The exporter implements io.Closer for clean-up. +func Start(logger *zap.Logger) *tracerProviderCloser { + exporter := newZapExporter(logger) + tracerProviderOptions := []trace.TracerProviderOption{ + trace.WithSampler(trace.AlwaysSample()), + trace.WithSpanProcessor(trace.NewBatchSpanProcessor(exporter)), + } + tracerProvider := newTracerProviderCloser(trace.NewTracerProvider(tracerProviderOptions...)) + otel.SetTracerProvider(tracerProvider) + otel.SetTextMapPropagator(propagation.TraceContext{}) + return tracerProvider +} diff --git a/vendor/github.com/bufbuild/buf/private/pkg/observabilityzap/tracer_provider_closer.go b/vendor/github.com/bufbuild/buf/private/pkg/observabilityzap/tracer_provider_closer.go new file mode 100644 index 0000000000..d5045cb7e5 --- /dev/null +++ b/vendor/github.com/bufbuild/buf/private/pkg/observabilityzap/tracer_provider_closer.go @@ -0,0 +1,44 @@ +// Copyright 2020-2023 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package observabilityzap + +import ( + "context" + "io" + + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/trace" +) + +var _ trace.TracerProvider = &tracerProviderCloser{} +var _ io.Closer = &tracerProviderCloser{} + +type tracerProviderCloser struct { + tracerProvider *sdktrace.TracerProvider +} + +func newTracerProviderCloser(tracerProvider *sdktrace.TracerProvider) *tracerProviderCloser { + return &tracerProviderCloser{ + tracerProvider: tracerProvider, + } +} + +func (t *tracerProviderCloser) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + return t.tracerProvider.Tracer(name, opts...) +} + +func (t *tracerProviderCloser) Close() error { + return t.tracerProvider.Shutdown(context.Background()) +} diff --git a/vendor/github.com/bufbuild/buf/private/pkg/observability/observabilityzap/usage.gen.go b/vendor/github.com/bufbuild/buf/private/pkg/observabilityzap/usage.gen.go similarity index 100% rename from vendor/github.com/bufbuild/buf/private/pkg/observability/observabilityzap/usage.gen.go rename to vendor/github.com/bufbuild/buf/private/pkg/observabilityzap/usage.gen.go diff --git a/vendor/github.com/bufbuild/buf/private/pkg/observabilityzap/zapexporter.go b/vendor/github.com/bufbuild/buf/private/pkg/observabilityzap/zapexporter.go new file mode 100644 index 0000000000..25c3f7438d --- /dev/null +++ b/vendor/github.com/bufbuild/buf/private/pkg/observabilityzap/zapexporter.go @@ -0,0 +1,56 @@ +// Copyright 2020-2023 Buf Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package observabilityzap + +import ( + "context" + + "go.opentelemetry.io/otel/sdk/trace" + "go.uber.org/zap" +) + +var _ trace.SpanExporter = &zapExporter{} + +type zapExporter struct { + logger *zap.Logger +} + +func newZapExporter(logger *zap.Logger) *zapExporter { + return &zapExporter{ + logger: logger, + } +} + +func (z *zapExporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { + for _, span := range spans { + if !span.SpanContext().IsSampled() { + continue + } + if checkedEntry := z.logger.Check(zap.DebugLevel, span.Name()); checkedEntry != nil { + fields := []zap.Field{ + zap.Duration("duration", span.EndTime().Sub(span.StartTime())), + } + for _, attribute := range span.Attributes() { + fields = append(fields, zap.Any(string(attribute.Key), attribute.Value)) + } + checkedEntry.Write(fields...) + } + } + return nil +} + +func (z *zapExporter) Shutdown(ctx context.Context) error { + return nil +} diff --git a/vendor/github.com/bufbuild/buf/private/pkg/protosource/protosource.go b/vendor/github.com/bufbuild/buf/private/pkg/protosource/protosource.go index cdff856706..d747204e33 100644 --- a/vendor/github.com/bufbuild/buf/private/pkg/protosource/protosource.go +++ b/vendor/github.com/bufbuild/buf/private/pkg/protosource/protosource.go @@ -1085,6 +1085,90 @@ func FreeMessageRanges(message Message) []MessageRange { return unused } +// CheckTagRangeIsSubset checks if supersetRanges is a superset of subsetRanges. +// If so, it returns true and nil. If not, it returns false with a slice of failing ranges from subsetRanges. +func CheckTagRangeIsSubset(supersetRanges []TagRange, subsetRanges []TagRange) (bool, []TagRange) { + if len(subsetRanges) == 0 { + return true, nil + } + + if len(supersetRanges) == 0 { + return false, subsetRanges + } + + supersetTagRangeGroups := groupAdjacentTagRanges(supersetRanges) + subsetTagRanges := sortTagRanges(subsetRanges) + missingTagRanges := []TagRange{} + + for i, j := 0, 0; j < len(subsetTagRanges); j++ { + for supersetTagRangeGroups[i].end < subsetTagRanges[j].Start() { + if i++; i == len(supersetTagRangeGroups) { + missingTagRanges = append(missingTagRanges, subsetTagRanges[j:]...) + return false, missingTagRanges + } + } + if supersetTagRangeGroups[i].start > subsetTagRanges[j].Start() || + supersetTagRangeGroups[i].end < subsetTagRanges[j].End() { + missingTagRanges = append(missingTagRanges, subsetTagRanges[j]) + } + } + + if len(missingTagRanges) != 0 { + return false, missingTagRanges + } + + return true, nil +} + +// groupAdjacentTagRanges sorts and groups adjacent tag ranges. +func groupAdjacentTagRanges(ranges []TagRange) []tagRangeGroup { + if len(ranges) == 0 { + return []tagRangeGroup{} + } + + sortedTagRanges := sortTagRanges(ranges) + + j := 0 + groupedTagRanges := make([]tagRangeGroup, 1, len(ranges)) + groupedTagRanges[j] = tagRangeGroup{ + ranges: sortedTagRanges[0:1], + start: sortedTagRanges[0].Start(), + end: sortedTagRanges[0].End(), + } + + for i := 1; i < len(sortedTagRanges); i++ { + if sortedTagRanges[i].Start() <= sortedTagRanges[i-1].End()+1 { + if sortedTagRanges[i].End() > groupedTagRanges[j].end { + groupedTagRanges[j].end = sortedTagRanges[i].End() + } + groupedTagRanges[j].ranges = groupedTagRanges[j].ranges[0 : len(groupedTagRanges[j].ranges)+1] + } else { + groupedTagRanges = append(groupedTagRanges, tagRangeGroup{ + ranges: sortedTagRanges[i : i+1], + start: sortedTagRanges[i].Start(), + end: sortedTagRanges[i].End(), + }) + j++ + } + } + + return groupedTagRanges +} + +// sortTagRanges sorts tag ranges by their start, end components. +func sortTagRanges(ranges []TagRange) []TagRange { + rangesCopy := make([]TagRange, len(ranges)) + copy(rangesCopy, ranges) + + sort.Slice(rangesCopy, func(i, j int) bool { + return rangesCopy[i].Start() < rangesCopy[j].Start() || + (rangesCopy[i].Start() == rangesCopy[j].Start() && + rangesCopy[i].End() < rangesCopy[j].End()) + }) + + return rangesCopy +} + func freeMessageRangeStringSuffix(freeRange MessageRange) string { start := freeRange.Start() end := freeRange.End() @@ -1132,3 +1216,9 @@ func mapToSortedFiles(keyToFileMap map[string]map[string]File) map[string][]File } return keyToSortedFiles } + +type tagRangeGroup struct { + ranges []TagRange + start int + end int +} diff --git a/vendor/github.com/bufbuild/buf/private/pkg/storage/bucket.go b/vendor/github.com/bufbuild/buf/private/pkg/storage/bucket.go index 3a111d8036..96c8868d52 100644 --- a/vendor/github.com/bufbuild/buf/private/pkg/storage/bucket.go +++ b/vendor/github.com/bufbuild/buf/private/pkg/storage/bucket.go @@ -52,6 +52,27 @@ type ReadBucket interface { Walk(ctx context.Context, prefix string, f func(ObjectInfo) error) error } +// PutOptions are the possible options that can be passed to a Put operation. +type PutOptions struct { + CustomChunkSize bool + ChunkSize int64 // measured in bytes +} + +// PutOption are options passed when putting an object in a bucket. +type PutOption func(*PutOptions) + +// PutWithChunkSize sets the passed size in bytes to `ChunkSize` and +// `CustomChunkSize` to true. Some implementations of `storage.WriteBucket.Put` +// allow multi-part upload, and allow customizing the chunk size of each part +// upload, or even disabling multi-part upload. This is a suggested chunk size, +// implementations may choose to ignore this option. +func PutWithChunkSize(sizeInBytes int64) PutOption { + return func(opts *PutOptions) { + opts.CustomChunkSize = true + opts.ChunkSize = sizeInBytes + } +} + // WriteBucket is a write-only bucket. type WriteBucket interface { // Put returns a WriteObjectCloser to write to the path. @@ -61,7 +82,7 @@ type WriteBucket interface { // The returned WriteObjectCloser is not thread-safe. // // Returns error on system error. - Put(ctx context.Context, path string) (WriteObjectCloser, error) + Put(ctx context.Context, path string, opts ...PutOption) (WriteObjectCloser, error) // Delete deletes the object at the path. // // Returns ErrNotExist if the path does not exist, other error diff --git a/vendor/github.com/bufbuild/buf/private/pkg/storage/diff.go b/vendor/github.com/bufbuild/buf/private/pkg/storage/diff.go index 6c525cbc1c..a70df50886 100644 --- a/vendor/github.com/bufbuild/buf/private/pkg/storage/diff.go +++ b/vendor/github.com/bufbuild/buf/private/pkg/storage/diff.go @@ -86,6 +86,9 @@ func DiffWithExternalPathPrefixes( // content: the file content. // // transform returns a string that is the transformed content of filename. +// +// TODO: this needs to be refactored or removed, especially the implicit side enum. +// Perhaps provide a transform function for a given bucket and apply it there. func DiffWithTransform( transform func(side string, filename string, content []byte) []byte, ) DiffOption { diff --git a/vendor/github.com/bufbuild/buf/private/pkg/storage/limit.go b/vendor/github.com/bufbuild/buf/private/pkg/storage/limit.go index f5a5d92216..9de2e9f2f7 100644 --- a/vendor/github.com/bufbuild/buf/private/pkg/storage/limit.go +++ b/vendor/github.com/bufbuild/buf/private/pkg/storage/limit.go @@ -47,8 +47,8 @@ func newLimitedWriteBucket(bucket WriteBucket, limit int64) *limitedWriteBucket } } -func (w *limitedWriteBucket) Put(ctx context.Context, path string) (WriteObjectCloser, error) { - writeObjectCloser, err := w.WriteBucket.Put(ctx, path) +func (w *limitedWriteBucket) Put(ctx context.Context, path string, opts ...PutOption) (WriteObjectCloser, error) { + writeObjectCloser, err := w.WriteBucket.Put(ctx, path, opts...) if err != nil { return nil, err } diff --git a/vendor/github.com/bufbuild/buf/private/pkg/storage/map.go b/vendor/github.com/bufbuild/buf/private/pkg/storage/map.go index 38b5cc726e..f924a84143 100644 --- a/vendor/github.com/bufbuild/buf/private/pkg/storage/map.go +++ b/vendor/github.com/bufbuild/buf/private/pkg/storage/map.go @@ -171,12 +171,12 @@ func newMapWriteBucket( } } -func (w *mapWriteBucket) Put(ctx context.Context, path string) (WriteObjectCloser, error) { +func (w *mapWriteBucket) Put(ctx context.Context, path string, opts ...PutOption) (WriteObjectCloser, error) { fullPath, err := w.getFullPath(path) if err != nil { return nil, err } - writeObjectCloser, err := w.delegate.Put(ctx, fullPath) + writeObjectCloser, err := w.delegate.Put(ctx, fullPath, opts...) // TODO: if this is a path error, we should replace the path if err != nil { return nil, err diff --git a/vendor/github.com/bufbuild/buf/private/pkg/storage/storagearchive/storagearchive.go b/vendor/github.com/bufbuild/buf/private/pkg/storage/storagearchive/storagearchive.go index 90cbe0dc14..3f78a2b1d5 100644 --- a/vendor/github.com/bufbuild/buf/private/pkg/storage/storagearchive/storagearchive.go +++ b/vendor/github.com/bufbuild/buf/private/pkg/storage/storagearchive/storagearchive.go @@ -109,9 +109,6 @@ func Untar( if tarHeader.Size < 0 { return fmt.Errorf("invalid size for tar file %s: %d", tarHeader.Name, tarHeader.Size) } - if tarHeader.Size > options.maxFileSize { - return fmt.Errorf("%w %s:%d", ErrFileSizeLimit, tarHeader.Name, tarHeader.Size) - } path, ok, err := unmapArchivePath(tarHeader.Name, mapper, stripComponentCount) if err != nil { return err @@ -119,6 +116,9 @@ func Untar( if !ok || !tarHeader.FileInfo().Mode().IsRegular() { continue } + if tarHeader.Size > options.maxFileSize { + return fmt.Errorf("%w %s:%d", ErrFileSizeLimit, tarHeader.Name, tarHeader.Size) + } if err := storage.CopyReader(ctx, writeBucket, tarReader, path); err != nil { return err } diff --git a/vendor/github.com/bufbuild/buf/private/pkg/storage/storagemem/bucket.go b/vendor/github.com/bufbuild/buf/private/pkg/storage/storagemem/bucket.go index db17abc870..53c37bc267 100644 --- a/vendor/github.com/bufbuild/buf/private/pkg/storage/storagemem/bucket.go +++ b/vendor/github.com/bufbuild/buf/private/pkg/storage/storagemem/bucket.go @@ -87,7 +87,7 @@ func (b *bucket) Walk(ctx context.Context, prefix string, f func(storage.ObjectI return nil } -func (b *bucket) Put(ctx context.Context, path string) (storage.WriteObjectCloser, error) { +func (b *bucket) Put(ctx context.Context, path string, _ ...storage.PutOption) (storage.WriteObjectCloser, error) { // No need to lock as we do no modifications until close path, err := storageutil.ValidatePath(path) if err != nil { diff --git a/vendor/github.com/bufbuild/buf/private/pkg/storage/storageos/bucket.go b/vendor/github.com/bufbuild/buf/private/pkg/storage/storageos/bucket.go index f92f08da24..212040dbba 100644 --- a/vendor/github.com/bufbuild/buf/private/pkg/storage/storageos/bucket.go +++ b/vendor/github.com/bufbuild/buf/private/pkg/storage/storageos/bucket.go @@ -161,7 +161,7 @@ func (b *bucket) Walk( return nil } -func (b *bucket) Put(ctx context.Context, path string) (storage.WriteObjectCloser, error) { +func (b *bucket) Put(ctx context.Context, path string, _ ...storage.PutOption) (storage.WriteObjectCloser, error) { externalPath, err := b.getExternalPath(path) if err != nil { return nil, err diff --git a/vendor/github.com/bufbuild/buf/private/pkg/tagrangeutil/tagrangeutil.go b/vendor/github.com/bufbuild/buf/private/pkg/tagrangeutil/tagrangeutil.go deleted file mode 100644 index 948e3afad7..0000000000 --- a/vendor/github.com/bufbuild/buf/private/pkg/tagrangeutil/tagrangeutil.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2020-2023 Buf Technologies, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tagrangeutil - -import ( - "sort" - - "github.com/bufbuild/buf/private/pkg/protosource" -) - -type tagRangeGroup struct { - ranges []protosource.TagRange - start int - end int -} - -// sortTagRanges sorts tag ranges by their start, end components. -func sortTagRanges(ranges []protosource.TagRange) []protosource.TagRange { - rangesCopy := make([]protosource.TagRange, len(ranges)) - copy(rangesCopy, ranges) - - sort.Slice(rangesCopy, func(i, j int) bool { - return rangesCopy[i].Start() < rangesCopy[j].Start() || - (rangesCopy[i].Start() == rangesCopy[j].Start() && - rangesCopy[i].End() < rangesCopy[j].End()) - }) - - return rangesCopy -} - -// groupAdjacentTagRanges sorts and groups adjacent tag ranges. -func groupAdjacentTagRanges(ranges []protosource.TagRange) []tagRangeGroup { - if len(ranges) == 0 { - return []tagRangeGroup{} - } - - sortedTagRanges := sortTagRanges(ranges) - - j := 0 - groupedTagRanges := make([]tagRangeGroup, 1, len(ranges)) - groupedTagRanges[j] = tagRangeGroup{ - ranges: sortedTagRanges[0:1], - start: sortedTagRanges[0].Start(), - end: sortedTagRanges[0].End(), - } - - for i := 1; i < len(sortedTagRanges); i++ { - if sortedTagRanges[i].Start() <= sortedTagRanges[i-1].End()+1 { - if sortedTagRanges[i].End() > groupedTagRanges[j].end { - groupedTagRanges[j].end = sortedTagRanges[i].End() - } - groupedTagRanges[j].ranges = groupedTagRanges[j].ranges[0 : len(groupedTagRanges[j].ranges)+1] - } else { - groupedTagRanges = append(groupedTagRanges, tagRangeGroup{ - ranges: sortedTagRanges[i : i+1], - start: sortedTagRanges[i].Start(), - end: sortedTagRanges[i].End(), - }) - j++ - } - } - - return groupedTagRanges -} - -// CheckIsSubset checks if supersetRanges is a superset of subsetRanges. -// If so, it returns true and nil. If not, it returns false with a slice of failing ranges from subsetRanges. -func CheckIsSubset(supersetRanges []protosource.TagRange, subsetRanges []protosource.TagRange) (bool, []protosource.TagRange) { - if len(subsetRanges) == 0 { - return true, nil - } - - if len(supersetRanges) == 0 { - return false, subsetRanges - } - - supersetTagRangeGroups := groupAdjacentTagRanges(supersetRanges) - subsetTagRanges := sortTagRanges(subsetRanges) - missingTagRanges := []protosource.TagRange{} - - for i, j := 0, 0; j < len(subsetTagRanges); j++ { - for supersetTagRangeGroups[i].end < subsetTagRanges[j].Start() { - if i++; i == len(supersetTagRangeGroups) { - missingTagRanges = append(missingTagRanges, subsetTagRanges[j:]...) - return false, missingTagRanges - } - } - if supersetTagRangeGroups[i].start > subsetTagRanges[j].Start() || - supersetTagRangeGroups[i].end < subsetTagRanges[j].End() { - missingTagRanges = append(missingTagRanges, subsetTagRanges[j]) - } - } - - if len(missingTagRanges) != 0 { - return false, missingTagRanges - } - - return true, nil -} diff --git a/vendor/github.com/bufbuild/buf/private/pkg/transport/http/httpclient/client.go b/vendor/github.com/bufbuild/buf/private/pkg/transport/http/httpclient/client.go index f936622ba2..4c837747aa 100644 --- a/vendor/github.com/bufbuild/buf/private/pkg/transport/http/httpclient/client.go +++ b/vendor/github.com/bufbuild/buf/private/pkg/transport/http/httpclient/client.go @@ -19,18 +19,14 @@ import ( "net" "net/http" - "github.com/bufbuild/buf/private/pkg/observability" - "go.opencensus.io/tag" "golang.org/x/net/http2" ) type clientOptions struct { - tlsConfig *tls.Config - observability bool - observabilityTags []tag.Mutator - proxy Proxy - interceptorFunc ClientInterceptorFunc - h2c bool + tlsConfig *tls.Config + proxy Proxy + interceptorFunc ClientInterceptorFunc + h2c bool } func newClient(options ...ClientOption) *http.Client { @@ -58,9 +54,6 @@ func newClient(options ...ClientOption) *http.Client { if opts.interceptorFunc != nil { roundTripper = opts.interceptorFunc(roundTripper) } - if opts.observability { - roundTripper = observability.NewHTTPTransport(roundTripper, opts.observabilityTags...) - } return &http.Client{ Transport: roundTripper, } diff --git a/vendor/github.com/bufbuild/buf/private/pkg/transport/http/httpclient/httpclient.go b/vendor/github.com/bufbuild/buf/private/pkg/transport/http/httpclient/httpclient.go index ef83f3d650..fb2765bc0f 100644 --- a/vendor/github.com/bufbuild/buf/private/pkg/transport/http/httpclient/httpclient.go +++ b/vendor/github.com/bufbuild/buf/private/pkg/transport/http/httpclient/httpclient.go @@ -18,8 +18,6 @@ import ( "crypto/tls" "net/http" "net/url" - - "go.opencensus.io/tag" ) // NewClient returns a new Client. @@ -42,17 +40,6 @@ func WithTLSConfig(tlsConfig *tls.Config) ClientOption { } } -// WithObservability returns a new ClientOption to use -// OpenCensus tracing and metrics. -// -// The default is to use no observability. -func WithObservability(tags ...tag.Mutator) ClientOption { - return func(opts *clientOptions) { - opts.observability = true - opts.observabilityTags = tags - } -} - // WithH2C returns a new ClientOption that allows dialing // h2c (cleartext) servers. func WithH2C() ClientOption { diff --git a/vendor/github.com/bufbuild/connect-go/Makefile b/vendor/github.com/bufbuild/connect-go/Makefile index ea1d4c6a8c..1d7421e568 100644 --- a/vendor/github.com/bufbuild/connect-go/Makefile +++ b/vendor/github.com/bufbuild/connect-go/Makefile @@ -7,7 +7,7 @@ MAKEFLAGS += --warn-undefined-variables MAKEFLAGS += --no-builtin-rules MAKEFLAGS += --no-print-directory BIN := .tmp/bin -COPYRIGHT_YEARS := 2021-2022 +COPYRIGHT_YEARS := 2021-2023 LICENSE_IGNORE := -e /testdata/ # Set to use a different compiler. For example, `GO=go1.18rc1 make test`. GO ?= go diff --git a/vendor/github.com/bufbuild/connect-go/buffer_pool.go b/vendor/github.com/bufbuild/connect-go/buffer_pool.go index 13f8a9e8a0..262bc7a472 100644 --- a/vendor/github.com/bufbuild/connect-go/buffer_pool.go +++ b/vendor/github.com/bufbuild/connect-go/buffer_pool.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022 Buf Technologies, Inc. +// Copyright 2021-2023 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/bufbuild/connect-go/client.go b/vendor/github.com/bufbuild/connect-go/client.go index ad22304079..d246931fa2 100644 --- a/vendor/github.com/bufbuild/connect-go/client.go +++ b/vendor/github.com/bufbuild/connect-go/client.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022 Buf Technologies, Inc. +// Copyright 2021-2023 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/bufbuild/connect-go/client_stream.go b/vendor/github.com/bufbuild/connect-go/client_stream.go index c7557afa48..8f81b33b55 100644 --- a/vendor/github.com/bufbuild/connect-go/client_stream.go +++ b/vendor/github.com/bufbuild/connect-go/client_stream.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022 Buf Technologies, Inc. +// Copyright 2021-2023 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -42,6 +42,9 @@ func (c *ClientStreamForClient[_, _]) Peer() Peer { // RequestHeader returns the request headers. Headers are sent to the server with the // first call to Send. +// +// Headers beginning with "Connect-" and "Grpc-" are reserved for use by the +// Connect and gRPC protocols. Applications shouldn't write them. func (c *ClientStreamForClient[Req, Res]) RequestHeader() http.Header { if c.err != nil { return http.Header{} @@ -189,6 +192,9 @@ func (b *BidiStreamForClient[_, _]) Peer() Peer { // RequestHeader returns the request headers. Headers are sent with the first // call to Send. +// +// Headers beginning with "Connect-" and "Grpc-" are reserved for use by the +// Connect and gRPC protocols. Applications shouldn't write them. func (b *BidiStreamForClient[Req, Res]) RequestHeader() http.Header { if b.err != nil { return http.Header{} diff --git a/vendor/github.com/bufbuild/connect-go/code.go b/vendor/github.com/bufbuild/connect-go/code.go index 557c38051c..a2110f7c28 100644 --- a/vendor/github.com/bufbuild/connect-go/code.go +++ b/vendor/github.com/bufbuild/connect-go/code.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022 Buf Technologies, Inc. +// Copyright 2021-2023 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/bufbuild/connect-go/codec.go b/vendor/github.com/bufbuild/connect-go/codec.go index 5d3f19b32e..a39acd6b81 100644 --- a/vendor/github.com/bufbuild/connect-go/codec.go +++ b/vendor/github.com/bufbuild/connect-go/codec.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022 Buf Technologies, Inc. +// Copyright 2021-2023 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/bufbuild/connect-go/compression.go b/vendor/github.com/bufbuild/connect-go/compression.go index 4e7c23a5c5..56241f4e5c 100644 --- a/vendor/github.com/bufbuild/connect-go/compression.go +++ b/vendor/github.com/bufbuild/connect-go/compression.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022 Buf Technologies, Inc. +// Copyright 2021-2023 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/bufbuild/connect-go/connect.go b/vendor/github.com/bufbuild/connect-go/connect.go index 6734cb28c4..25baee119c 100644 --- a/vendor/github.com/bufbuild/connect-go/connect.go +++ b/vendor/github.com/bufbuild/connect-go/connect.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022 Buf Technologies, Inc. +// Copyright 2021-2023 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -32,7 +32,7 @@ import ( ) // Version is the semantic version of the connect module. -const Version = "1.4.1" +const Version = "1.5.0" // These constants are used in compile-time handshakes with connect's generated // code. @@ -62,6 +62,10 @@ const ( // Receive returns an error wrapping [io.EOF]. Handlers should check for this // using the standard library's [errors.Is]. // +// Headers and trailers beginning with "Connect-" and "Grpc-" are reserved for +// use by the gRPC and Connect protocols: applications may read them but +// shouldn't write them. +// // StreamingHandlerConn implementations provided by this module guarantee that // all returned errors can be cast to [*Error] using the standard library's // [errors.As]. @@ -91,6 +95,10 @@ type StreamingHandlerConn interface { // return an error wrapping [io.EOF]; clients may then call Receive to unmarshal // the error. // +// Headers and trailers beginning with "Connect-" and "Grpc-" are reserved for +// use by the gRPC and Connect protocols: applications may read them but +// shouldn't write them. +// // StreamingClientConn implementations provided by this module guarantee that // all returned errors can be cast to [*Error] using the standard library's // [errors.As]. @@ -153,7 +161,9 @@ func (r *Request[_]) Peer() Peer { return r.peer } -// Header returns the HTTP headers for this request. +// Header returns the HTTP headers for this request. Headers beginning with +// "Connect-" and "Grpc-" are reserved for use by the Connect and gRPC +// protocols: applications may read them but shouldn't write them. func (r *Request[_]) Header() http.Header { if r.header == nil { r.header = make(http.Header) @@ -167,6 +177,10 @@ func (r *Request[_]) internalOnly() {} // AnyRequest is the common method set of every [Request], regardless of type // parameter. It's used in unary interceptors. // +// Headers and trailers beginning with "Connect-" and "Grpc-" are reserved for +// use by the gRPC and Connect protocols: applications may read them but +// shouldn't write them. +// // To preserve our ability to add methods to this interface without breaking // backward compatibility, only types defined in this package can implement // AnyRequest. @@ -205,7 +219,9 @@ func (r *Response[_]) Any() any { return r.Msg } -// Header returns the HTTP headers for this response. +// Header returns the HTTP headers for this response. Headers beginning with +// "Connect-" and "Grpc-" are reserved for use by the Connect and gRPC +// protocols: applications may read them but shouldn't write them. func (r *Response[_]) Header() http.Header { if r.header == nil { r.header = make(http.Header) @@ -216,6 +232,10 @@ func (r *Response[_]) Header() http.Header { // Trailer returns the trailers for this response. Depending on the underlying // RPC protocol, trailers may be sent as HTTP trailers or a protocol-specific // block of in-body metadata. +// +// Trailers beginning with "Connect-" and "Grpc-" are reserved for use by the +// Connect and gRPC protocols: applications may read them but shouldn't write +// them. func (r *Response[_]) Trailer() http.Header { if r.trailer == nil { r.trailer = make(http.Header) @@ -229,6 +249,10 @@ func (r *Response[_]) internalOnly() {} // AnyResponse is the common method set of every [Response], regardless of type // parameter. It's used in unary interceptors. // +// Headers and trailers beginning with "Connect-" and "Grpc-" are reserved for +// use by the gRPC and Connect protocols: applications may read them but +// shouldn't write them. +// // To preserve our ability to add methods to this interface without breaking // backward compatibility, only types defined in this package can implement // AnyResponse. diff --git a/vendor/github.com/bufbuild/connect-go/duplex_http_call.go b/vendor/github.com/bufbuild/connect-go/duplex_http_call.go index c365f34db9..aec8c7698e 100644 --- a/vendor/github.com/bufbuild/connect-go/duplex_http_call.go +++ b/vendor/github.com/bufbuild/connect-go/duplex_http_call.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022 Buf Technologies, Inc. +// Copyright 2021-2023 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/bufbuild/connect-go/envelope.go b/vendor/github.com/bufbuild/connect-go/envelope.go index ecde9570ea..27078bf237 100644 --- a/vendor/github.com/bufbuild/connect-go/envelope.go +++ b/vendor/github.com/bufbuild/connect-go/envelope.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022 Buf Technologies, Inc. +// Copyright 2021-2023 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/bufbuild/connect-go/error.go b/vendor/github.com/bufbuild/connect-go/error.go index a4cd72b9c6..16fb11c34f 100644 --- a/vendor/github.com/bufbuild/connect-go/error.go +++ b/vendor/github.com/bufbuild/connect-go/error.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022 Buf Technologies, Inc. +// Copyright 2021-2023 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -118,6 +118,25 @@ func NewError(c Code, underlying error) *Error { return &Error{code: c, err: underlying} } +// NewWireError is similar to [NewError], but the resulting *Error returns true +// when tested with [IsWireError]. +// +// This is useful for clients trying to propagate partial failures from +// streaming RPCs. Often, these RPCs include error information in their +// response messages (for example, [gRPC server reflection] and +// OpenTelemtetry's [OTLP]). Clients propagating these errors up the stack +// should use NewWireError to clarify that the error code, message, and details +// (if any) were explicitly sent by the server rather than inferred from a +// lower-level networking error or timeout. +// +// [gRPC server reflection]: https://github.com/grpc/grpc/blob/v1.49.2/src/proto/grpc/reflection/v1alpha/reflection.proto#L132-L136 +// [OTLP]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/otlp.md#partial-success +func NewWireError(c Code, underlying error) *Error { + err := NewError(c, underlying) + err.wireErr = true + return err +} + // IsWireError checks whether the error was returned by the server, as opposed // to being synthesized by the client. // diff --git a/vendor/github.com/bufbuild/connect-go/error_writer.go b/vendor/github.com/bufbuild/connect-go/error_writer.go index 1c62760f3d..bd9c900df9 100644 --- a/vendor/github.com/bufbuild/connect-go/error_writer.go +++ b/vendor/github.com/bufbuild/connect-go/error_writer.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022 Buf Technologies, Inc. +// Copyright 2021-2023 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/bufbuild/connect-go/handler.go b/vendor/github.com/bufbuild/connect-go/handler.go index 0560e9339e..296632a8e9 100644 --- a/vendor/github.com/bufbuild/connect-go/handler.go +++ b/vendor/github.com/bufbuild/connect-go/handler.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022 Buf Technologies, Inc. +// Copyright 2021-2023 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/bufbuild/connect-go/handler_stream.go b/vendor/github.com/bufbuild/connect-go/handler_stream.go index 5eb7a9530f..00e2833535 100644 --- a/vendor/github.com/bufbuild/connect-go/handler_stream.go +++ b/vendor/github.com/bufbuild/connect-go/handler_stream.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022 Buf Technologies, Inc. +// Copyright 2021-2023 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -91,12 +91,18 @@ type ServerStream[Res any] struct { // ResponseHeader returns the response headers. Headers are sent with the first // call to Send. +// +// Headers beginning with "Connect-" and "Grpc-" are reserved for use by the +// Connect and gRPC protocols. Applications shouldn't write them. func (s *ServerStream[Res]) ResponseHeader() http.Header { return s.conn.ResponseHeader() } // ResponseTrailer returns the response trailers. Handlers may write to the // response trailers at any time before returning. +// +// Trailers beginning with "Connect-" and "Grpc-" are reserved for use by the +// Connect and gRPC protocols. Applications shouldn't write them. func (s *ServerStream[Res]) ResponseTrailer() http.Header { return s.conn.ResponseTrailer() } @@ -151,12 +157,18 @@ func (b *BidiStream[Req, Res]) Receive() (*Req, error) { // ResponseHeader returns the response headers. Headers are sent with the first // call to Send. +// +// Headers beginning with "Connect-" and "Grpc-" are reserved for use by the +// Connect and gRPC protocols. Applications shouldn't write them. func (b *BidiStream[Req, Res]) ResponseHeader() http.Header { return b.conn.ResponseHeader() } // ResponseTrailer returns the response trailers. Handlers may write to the // response trailers at any time before returning. +// +// Trailers beginning with "Connect-" and "Grpc-" are reserved for use by the +// Connect and gRPC protocols. Applications shouldn't write them. func (b *BidiStream[Req, Res]) ResponseTrailer() http.Header { return b.conn.ResponseTrailer() } diff --git a/vendor/github.com/bufbuild/connect-go/header.go b/vendor/github.com/bufbuild/connect-go/header.go index d0ca7f51c6..7f05aeaeac 100644 --- a/vendor/github.com/bufbuild/connect-go/header.go +++ b/vendor/github.com/bufbuild/connect-go/header.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022 Buf Technologies, Inc. +// Copyright 2021-2023 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/bufbuild/connect-go/interceptor.go b/vendor/github.com/bufbuild/connect-go/interceptor.go index b9373683c1..7ffe19d505 100644 --- a/vendor/github.com/bufbuild/connect-go/interceptor.go +++ b/vendor/github.com/bufbuild/connect-go/interceptor.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022 Buf Technologies, Inc. +// Copyright 2021-2023 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/bufbuild/connect-go/internal/gen/connectext/grpc/status/v1/status.pb.go b/vendor/github.com/bufbuild/connect-go/internal/gen/connectext/grpc/status/v1/status.pb.go index 39708334e5..fd266220b6 100644 --- a/vendor/github.com/bufbuild/connect-go/internal/gen/connectext/grpc/status/v1/status.pb.go +++ b/vendor/github.com/bufbuild/connect-go/internal/gen/connectext/grpc/status/v1/status.pb.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022 Buf Technologies, Inc. +// Copyright 2021-2023 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/bufbuild/connect-go/maxbytes.go b/vendor/github.com/bufbuild/connect-go/maxbytes.go index 455dba2baa..8e9505e4ec 100644 --- a/vendor/github.com/bufbuild/connect-go/maxbytes.go +++ b/vendor/github.com/bufbuild/connect-go/maxbytes.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022 Buf Technologies, Inc. +// Copyright 2021-2023 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/bufbuild/connect-go/maxbytes_go118.go b/vendor/github.com/bufbuild/connect-go/maxbytes_go118.go index 32a3e0228f..7a6765f77c 100644 --- a/vendor/github.com/bufbuild/connect-go/maxbytes_go118.go +++ b/vendor/github.com/bufbuild/connect-go/maxbytes_go118.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022 Buf Technologies, Inc. +// Copyright 2021-2023 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/bufbuild/connect-go/option.go b/vendor/github.com/bufbuild/connect-go/option.go index 7fd2e8ad53..a997b71af9 100644 --- a/vendor/github.com/bufbuild/connect-go/option.go +++ b/vendor/github.com/bufbuild/connect-go/option.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022 Buf Technologies, Inc. +// Copyright 2021-2023 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/bufbuild/connect-go/protobuf_util.go b/vendor/github.com/bufbuild/connect-go/protobuf_util.go index b98937b4b4..a25d3d0709 100644 --- a/vendor/github.com/bufbuild/connect-go/protobuf_util.go +++ b/vendor/github.com/bufbuild/connect-go/protobuf_util.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022 Buf Technologies, Inc. +// Copyright 2021-2023 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/bufbuild/connect-go/protocol.go b/vendor/github.com/bufbuild/connect-go/protocol.go index 5d2918093b..5fefd790c9 100644 --- a/vendor/github.com/bufbuild/connect-go/protocol.go +++ b/vendor/github.com/bufbuild/connect-go/protocol.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022 Buf Technologies, Inc. +// Copyright 2021-2023 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -320,5 +320,15 @@ func canonicalizeContentType(ct string) string { if err != nil { return ct } + + // According to RFC 9110 Section 8.3.2, the charset parameter value should be treated as case-insensitive. + // mime.FormatMediaType canonicalizes parameter names, but not parameter values, + // because the case sensitivity of a parameter value depends on its semantics. + // Therefore, the charset parameter value should be canonicalized here. + // ref.) https://httpwg.org/specs/rfc9110.html#rfc.section.8.3.2 + if charset, ok := params["charset"]; ok { + params["charset"] = strings.ToLower(charset) + } + return mime.FormatMediaType(base, params) } diff --git a/vendor/github.com/bufbuild/connect-go/protocol_connect.go b/vendor/github.com/bufbuild/connect-go/protocol_connect.go index 796354f085..db5ff27b3f 100644 --- a/vendor/github.com/bufbuild/connect-go/protocol_connect.go +++ b/vendor/github.com/bufbuild/connect-go/protocol_connect.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022 Buf Technologies, Inc. +// Copyright 2021-2023 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -56,10 +56,10 @@ func (*protocolConnect) NewHandler(params *protocolHandlerParams) protocolHandle contentTypes := make(map[string]struct{}) for _, name := range params.Codecs.Names() { if params.Spec.StreamType == StreamTypeUnary { - contentTypes[connectUnaryContentTypePrefix+name] = struct{}{} + contentTypes[canonicalizeContentType(connectUnaryContentTypePrefix+name)] = struct{}{} continue } - contentTypes[connectStreamingContentTypePrefix+name] = struct{}{} + contentTypes[canonicalizeContentType(connectStreamingContentTypePrefix+name)] = struct{}{} } return &connectHandler{ protocolHandlerParams: *params, @@ -943,8 +943,7 @@ func (e *connectWireError) asError() *Error { if e.Code < minCode || e.Code > maxCode { e.Code = CodeUnknown } - err := NewError(e.Code, errors.New(e.Message)) - err.wireErr = true + err := NewWireError(e.Code, errors.New(e.Message)) if len(e.Details) > 0 { err.details = make([]*ErrorDetail, len(e.Details)) for i, detail := range e.Details { diff --git a/vendor/github.com/bufbuild/connect-go/protocol_grpc.go b/vendor/github.com/bufbuild/connect-go/protocol_grpc.go index 35235468f8..5ef09c1e77 100644 --- a/vendor/github.com/bufbuild/connect-go/protocol_grpc.go +++ b/vendor/github.com/bufbuild/connect-go/protocol_grpc.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022 Buf Technologies, Inc. +// Copyright 2021-2023 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -85,7 +85,7 @@ func (g *protocolGRPC) NewHandler(params *protocolHandlerParams) protocolHandler } contentTypes := make(map[string]struct{}) for _, name := range params.Codecs.Names() { - contentTypes[prefix+name] = struct{}{} + contentTypes[canonicalizeContentType(prefix+name)] = struct{}{} } if params.Codecs.Get(codecNameProto) != nil { contentTypes[bare] = struct{}{} @@ -701,8 +701,7 @@ func grpcErrorFromTrailer(bufferPool *bufferPool, protobuf Codec, trailer http.H return errorf(CodeInternal, "gRPC protocol error: invalid error code %q", codeHeader) } message := grpcPercentDecode(bufferPool, trailer.Get(grpcHeaderMessage)) - retErr := NewError(Code(code), errors.New(message)) - retErr.wireErr = true + retErr := NewWireError(Code(code), errors.New(message)) detailsBinaryEncoded := trailer.Get(grpcHeaderDetails) if len(detailsBinaryEncoded) > 0 { diff --git a/vendor/github.com/bufbuild/connect-go/recover.go b/vendor/github.com/bufbuild/connect-go/recover.go index c126d36acc..5cda08a9ac 100644 --- a/vendor/github.com/bufbuild/connect-go/recover.go +++ b/vendor/github.com/bufbuild/connect-go/recover.go @@ -1,4 +1,4 @@ -// Copyright 2021-2022 Buf Technologies, Inc. +// Copyright 2021-2023 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/bufbuild/protocompile/.golangci.yml b/vendor/github.com/bufbuild/protocompile/.golangci.yml index 67377d6956..37a369a635 100644 --- a/vendor/github.com/bufbuild/protocompile/.golangci.yml +++ b/vendor/github.com/bufbuild/protocompile/.golangci.yml @@ -48,7 +48,6 @@ linters: - goconst - gocyclo - goerr113 - - gosec - interfacebloat - nestif - nilerr @@ -93,6 +92,8 @@ issues: linters: - paralleltest # dupword reports several errors in .proto test fixtures + # gosec reports a few minor issues in tests - path: _test\.go linters: - dupword + - gosec diff --git a/vendor/github.com/bufbuild/protocompile/LICENSE b/vendor/github.com/bufbuild/protocompile/LICENSE index 261eeb9e9f..04cf1e316a 100644 --- a/vendor/github.com/bufbuild/protocompile/LICENSE +++ b/vendor/github.com/bufbuild/protocompile/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2020-2022 Buf Technologies, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/bufbuild/protocompile/Makefile b/vendor/github.com/bufbuild/protocompile/Makefile index 47d87c7dd1..bd3b85ba3c 100644 --- a/vendor/github.com/bufbuild/protocompile/Makefile +++ b/vendor/github.com/bufbuild/protocompile/Makefile @@ -76,7 +76,7 @@ lintfix: $(BIN)/golangci-lint ## Automatically fix some lint errors .PHONY: generate generate: $(BIN)/license-header $(BIN)/goyacc test-descriptors ## Regenerate code and licenses - PATH=$(BIN):$(PATH) $(GO) generate ./... + PATH="$(BIN):$(PATH)" $(GO) generate ./... @# We want to operate on a list of modified and new files, excluding @# deleted and ignored files. git-ls-files can't do this alone. comm -23 takes @# two files and prints the union, dropping lines common to both (-3) and diff --git a/vendor/github.com/bufbuild/protocompile/compiler.go b/vendor/github.com/bufbuild/protocompile/compiler.go index e188f46d0f..b219621c81 100644 --- a/vendor/github.com/bufbuild/protocompile/compiler.go +++ b/vendor/github.com/bufbuild/protocompile/compiler.go @@ -533,6 +533,13 @@ func (t *task) link(parseRes parser.Result, deps linker.Files) (linker.File, err } func (t *task) asParseResult(name string, r SearchResult) (parser.Result, error) { + if r.ParseResult != nil { + if r.ParseResult.FileDescriptorProto().GetName() != name { + return nil, fmt.Errorf("search result for %q returned descriptor for %q", name, r.ParseResult.FileDescriptorProto().GetName()) + } + return r.ParseResult, nil + } + if r.Proto != nil { if r.Proto.GetName() != name { return nil, fmt.Errorf("search result for %q returned descriptor for %q", name, r.Proto.GetName()) diff --git a/vendor/github.com/bufbuild/protocompile/go.work.sum b/vendor/github.com/bufbuild/protocompile/go.work.sum index f9653baadf..5a5be40fa6 100644 --- a/vendor/github.com/bufbuild/protocompile/go.work.sum +++ b/vendor/github.com/bufbuild/protocompile/go.work.sum @@ -10,11 +10,165 @@ cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= @@ -29,6 +183,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= @@ -43,19 +198,32 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/jhump/protoreflect v1.13.1-0.20220928232736-101791cb1b4c h1:XImQJfpJLmGEEd8ll5yPVyL/aEvmgGHW4WYTyNseLOM= +github.com/jhump/protoreflect v1.13.1-0.20220928232736-101791cb1b4c/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI= +github.com/jhump/protoreflect v1.14.0 h1:MBbQK392K3u8NTLbKOCIi3XdI+y+c6yt5oMq0X3xviw= +github.com/jhump/protoreflect v1.14.0/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= @@ -67,7 +235,15 @@ golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= @@ -77,7 +253,15 @@ golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -96,14 +280,25 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= @@ -122,12 +317,23 @@ google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= google.golang.org/api v0.81.0/go.mod h1:FA6Mb/bZxj706H2j+j2d6mHEEaHBmbbWnkfvmorOCko= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= @@ -162,8 +368,23 @@ google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd h1:e0TwkXOdbnH/1x5rc5MZ/VYyiZ4v+RdVfrGMqEwT68I= google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a h1:GH6UPn3ixhWcKDhpnEC55S75cerLPdpp3hrhfKYjZgw= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c h1:QgY/XxIAIeccR+Ca/rDdKubLIU9rcJ3xfy1DC/Wd2Oo= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= @@ -177,8 +398,12 @@ google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ5 google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/bufbuild/protocompile/linker/files.go b/vendor/github.com/bufbuild/protocompile/linker/files.go index 06492feca6..aad534756b 100644 --- a/vendor/github.com/bufbuild/protocompile/linker/files.go +++ b/vendor/github.com/bufbuild/protocompile/linker/files.go @@ -78,7 +78,7 @@ func newFile(f protoreflect.FileDescriptor, deps Files) (File, error) { if err != nil { return nil, err } - return file{ + return &file{ FileDescriptor: f, descs: descs, deps: deps, @@ -133,23 +133,23 @@ type file struct { deps Files } -func (f file) FindDescriptorByName(name protoreflect.FullName) protoreflect.Descriptor { +func (f *file) FindDescriptorByName(name protoreflect.FullName) protoreflect.Descriptor { return f.descs[name] } -func (f file) FindImportByPath(path string) File { +func (f *file) FindImportByPath(path string) File { return f.deps.FindFileByPath(path) } -func (f file) FindExtensionByNumber(msg protoreflect.FullName, tag protoreflect.FieldNumber) protoreflect.ExtensionTypeDescriptor { +func (f *file) FindExtensionByNumber(msg protoreflect.FullName, tag protoreflect.FieldNumber) protoreflect.ExtensionTypeDescriptor { return findExtension(f, msg, tag) } -func (f file) importsAsFiles() Files { +func (f *file) importsAsFiles() Files { return f.deps } -var _ File = file{} +var _ File = (*file)(nil) // Files represents a set of protobuf files. It is a slice of File values, but // also provides a method for easily looking up files by path and name. diff --git a/vendor/github.com/bufbuild/protocompile/linker/symbols.go b/vendor/github.com/bufbuild/protocompile/linker/symbols.go index d8b8bc3754..ca2ffe478a 100644 --- a/vendor/github.com/bufbuild/protocompile/linker/symbols.go +++ b/vendor/github.com/bufbuild/protocompile/linker/symbols.go @@ -67,7 +67,7 @@ func (s *Symbols) Import(fd protoreflect.FileDescriptor, handler *reporter.Handl return nil } - if f, ok := fd.(file); ok { + if f, ok := fd.(*file); ok { // unwrap any file instance fd = f.FileDescriptor } diff --git a/vendor/github.com/bufbuild/protocompile/parser/lexer.go b/vendor/github.com/bufbuild/protocompile/parser/lexer.go index e90779a2c7..b674c10fa7 100644 --- a/vendor/github.com/bufbuild/protocompile/parser/lexer.go +++ b/vendor/github.com/bufbuild/protocompile/parser/lexer.go @@ -495,6 +495,18 @@ func (l *protoLex) readIdentifier() { func (l *protoLex) readStringLiteral(quote rune) (string, error) { var buf bytes.Buffer + var escapeErrors []reporter.ErrorWithPos + reportErr := func(msg, badEscape string) { + var err error + if strings.HasSuffix(msg, "%s") { + err = fmt.Errorf(msg, badEscape) + } else { + err = errors.New(msg) + } + // we've now consumed the bad escape and lexer position is after it, so we need + // to back up to the beginning of the escape to report the correct position + escapeErrors = append(escapeErrors, l.errWithCurrentPos(err, -len(badEscape))) + } for { c, _, err := l.input.readRune() if err != nil { @@ -510,7 +522,8 @@ func (l *protoLex) readStringLiteral(quote rune) (string, error) { break } if c == 0 { - return "", errors.New("null character ('\\0') not allowed in string literal") + reportErr("null character ('\\0') not allowed in string literal", string(rune(0))) + continue } if c == '\\' { // escape sequence @@ -521,10 +534,15 @@ func (l *protoLex) readStringLiteral(quote rune) (string, error) { switch { case c == 'x' || c == 'X': // hex escape - c, _, err := l.input.readRune() + c1, sz1, err := l.input.readRune() if err != nil { return "", err } + if c1 == quote || c1 == '\\' { + l.input.unreadRune(sz1) + reportErr("invalid hex escape: %s", "\\"+string(c)) + continue + } c2, sz2, err := l.input.readRune() if err != nil { return "", err @@ -532,13 +550,14 @@ func (l *protoLex) readStringLiteral(quote rune) (string, error) { var hex string if (c2 < '0' || c2 > '9') && (c2 < 'a' || c2 > 'f') && (c2 < 'A' || c2 > 'F') { l.input.unreadRune(sz2) - hex = string(c) + hex = string(c1) } else { - hex = string([]rune{c, c2}) + hex = string([]rune{c1, c2}) } i, err := strconv.ParseInt(hex, 16, 32) if err != nil { - return "", fmt.Errorf("invalid hex escape: \\x%q", hex) + reportErr("invalid hex escape: %s", "\\"+string(c)+hex) + continue } buf.WriteByte(byte(i)) case c >= '0' && c <= '7': @@ -565,43 +584,68 @@ func (l *protoLex) readStringLiteral(quote rune) (string, error) { } i, err := strconv.ParseInt(octal, 8, 32) if err != nil { - return "", fmt.Errorf("invalid octal escape: \\%q", octal) + reportErr("invalid octal escape: %s", "\\"+octal) + continue } if i > 0xff { - return "", fmt.Errorf("octal escape is out range, must be between 0 and 377: \\%q", octal) + reportErr("octal escape is out range, must be between 0 and 377: %s", "\\"+octal) + continue } buf.WriteByte(byte(i)) case c == 'u': // short unicode escape u := make([]rune, 4) for i := range u { - c, _, err := l.input.readRune() + c2, sz2, err := l.input.readRune() if err != nil { return "", err } - u[i] = c + if c2 == quote || c2 == '\\' { + l.input.unreadRune(sz2) + u = u[:i] + break + } + u[i] = c2 } - i, err := strconv.ParseInt(string(u), 16, 32) + codepointStr := string(u) + if len(u) < 4 { + reportErr("invalid unicode escape: %s", "\\u"+codepointStr) + continue + } + i, err := strconv.ParseInt(codepointStr, 16, 32) if err != nil { - return "", fmt.Errorf("invalid unicode escape: \\u%q", string(u)) + reportErr("invalid unicode escape: %s", "\\u"+codepointStr) + continue } buf.WriteRune(rune(i)) case c == 'U': // long unicode escape u := make([]rune, 8) for i := range u { - c, _, err := l.input.readRune() + c2, sz2, err := l.input.readRune() if err != nil { return "", err } - u[i] = c + if c2 == quote || c2 == '\\' { + l.input.unreadRune(sz2) + u = u[:i] + break + } + u[i] = c2 + } + codepointStr := string(u) + if len(u) < 8 { + reportErr("invalid unicode escape: %s", "\\U"+codepointStr) + continue } i, err := strconv.ParseInt(string(u), 16, 32) if err != nil { - return "", fmt.Errorf("invalid unicode escape: \\U%q", string(u)) + reportErr("invalid unicode escape: %s", "\\U"+codepointStr) + continue } if i > 0x10ffff || i < 0 { - return "", fmt.Errorf("unicode escape is out of range, must be between 0 and 0x10ffff: \\U%q", string(u)) + reportErr("unicode escape is out of range, must be between 0 and 0x10ffff: %s", "\\U"+codepointStr) + continue } buf.WriteRune(rune(i)) case c == 'a': @@ -627,12 +671,22 @@ func (l *protoLex) readStringLiteral(quote rune) (string, error) { case c == '?': buf.WriteByte('?') default: - return "", fmt.Errorf("invalid escape sequence: %q", "\\"+string(c)) + reportErr("invalid escape sequence: %s", "\\"+string(c)) + continue } } else { buf.WriteRune(c) } } + if len(escapeErrors) > 0 { + // Report all but the last. Return that one for caller to report. + // If error handler does not accept all of them, it will at least + // get the first error. + for i := 0; i < len(escapeErrors)-1; i++ { + _ = l.addSourceError(escapeErrors[i]) + } + return "", escapeErrors[len(escapeErrors)-1] + } return buf.String(), nil } @@ -691,3 +745,11 @@ func (l *protoLex) addSourceError(err error) reporter.ErrorWithPos { func (l *protoLex) Error(s string) { _ = l.addSourceError(errors.New(s)) } + +func (l *protoLex) errWithCurrentPos(err error, offset int) reporter.ErrorWithPos { + if ewp, ok := err.(reporter.ErrorWithPos); ok { + return ewp + } + pos := l.info.SourcePos(l.input.offset() + offset) + return reporter.Error(pos, err) +} diff --git a/vendor/github.com/bufbuild/protocompile/resolver.go b/vendor/github.com/bufbuild/protocompile/resolver.go index 755554f0d4..706b873bda 100644 --- a/vendor/github.com/bufbuild/protocompile/resolver.go +++ b/vendor/github.com/bufbuild/protocompile/resolver.go @@ -15,7 +15,9 @@ package protocompile import ( + "errors" "io" + "io/fs" "os" "path/filepath" "strings" @@ -25,6 +27,7 @@ import ( "google.golang.org/protobuf/types/descriptorpb" "github.com/bufbuild/protocompile/ast" + "github.com/bufbuild/protocompile/parser" ) // Resolver is used by the compiler to resolve a proto source file name @@ -44,7 +47,7 @@ type Resolver interface { // the various fields must be set, based on what is available for a file. If // multiple fields are set, the compiler prefers them in opposite order listed: // so it uses a descriptor if present and only falls back to source if nothing -// else if available. +// else is available. type SearchResult struct { // Represents source code for the file. This should be nil if source code // is not available. If no field below is set, then the compiler will parse @@ -57,6 +60,13 @@ type SearchResult struct { // set, then the compiler will link this proto with its dependencies to // produce a linked descriptor. Proto *descriptorpb.FileDescriptorProto + // A parse result for the file. This packages both an AST and a descriptor + // proto in one. When a parser result is available, it is more efficient + // than using an AST search result, since the descriptor proto need not be + // re-created. And it provides better error messages than a descriptor proto + // search results, since the AST has greater fidelity with regard to source + // positions (even if the descriptor proto includes source code info). + ParseResult parser.Result // A fully linked descriptor that represents the file. If this field is set, // then the compiler has no additional work to do for this file as it is // already compiled. @@ -131,7 +141,7 @@ func (r *SourceResolver) FindFileByPath(path string) (SearchResult, error) { for _, importPath := range r.ImportPaths { reader, err := r.accessFile(filepath.Join(importPath, path)) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { e = err continue } diff --git a/vendor/github.com/cavaliergopher/cpio/.travis.yml b/vendor/github.com/cavaliergopher/cpio/.travis.yml new file mode 100644 index 0000000000..7189a09ad1 --- /dev/null +++ b/vendor/github.com/cavaliergopher/cpio/.travis.yml @@ -0,0 +1,10 @@ +language: go + +go: +- 1.17 +- 1.16 +- 1.15 +- 1.14 +- 1.13 +- 1.12 +- 1.11 diff --git a/vendor/github.com/cavaliergopher/cpio/LICENSE b/vendor/github.com/cavaliergopher/cpio/LICENSE new file mode 100644 index 0000000000..7f377a1b68 --- /dev/null +++ b/vendor/github.com/cavaliergopher/cpio/LICENSE @@ -0,0 +1,26 @@ +Copyright (c) 2017 Ryan Armstrong. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/cavaliergopher/cpio/README.md b/vendor/github.com/cavaliergopher/cpio/README.md new file mode 100644 index 0000000000..6613d23197 --- /dev/null +++ b/vendor/github.com/cavaliergopher/cpio/README.md @@ -0,0 +1,9 @@ +# cpio +[![Go Reference](https://pkg.go.dev/badge/github.com/cavaliergopher/cpio.svg)](https://pkg.go.dev/github.com/cavaliergopher/cpio) [![Build Status](https://app.travis-ci.com/cavaliergopher/cpio.svg?branch=main)](https://app.travis-ci.com/cavaliergopher/cpio) [![Go Report Card](https://goreportcard.com/badge/github.com/cavaliergopher/cpio)](https://goreportcard.com/report/github.com/cavaliergopher/cpio) + +Package cpio provides readers and writers for the CPIO archive file format. + +Currently, only the SVR4 (New ASCII) format is supported, both with and without +checksums. + +Copyright 2021, Ryan Armstrong diff --git a/vendor/github.com/cavaliergopher/cpio/doc.go b/vendor/github.com/cavaliergopher/cpio/doc.go new file mode 100644 index 0000000000..293170bb41 --- /dev/null +++ b/vendor/github.com/cavaliergopher/cpio/doc.go @@ -0,0 +1,9 @@ +/* +Package cpio providers readers and writers for CPIO archives. Currently, only +the SVR4 (New ASCII) format is supported, both with and without checksums. + +This package aims to be feel like Go's archive/tar package. + +See the CPIO man page: https://www.freebsd.org/cgi/man.cgi?query=cpio&sektion=5 +*/ +package cpio diff --git a/vendor/github.com/cavaliergopher/cpio/fileinfo.go b/vendor/github.com/cavaliergopher/cpio/fileinfo.go new file mode 100644 index 0000000000..7035801550 --- /dev/null +++ b/vendor/github.com/cavaliergopher/cpio/fileinfo.go @@ -0,0 +1,58 @@ +package cpio + +import ( + "os" + "path" + "time" +) + +// fileInfo implements fs.FileInfo. +type fileInfo struct { + h *Header +} + +// Name returns the base name of the file. +func (fi fileInfo) Name() string { + if fi.IsDir() { + return path.Base(path.Clean(fi.h.Name)) + } + return path.Base(fi.h.Name) +} + +func (fi fileInfo) Size() int64 { return fi.h.Size } +func (fi fileInfo) IsDir() bool { return fi.Mode().IsDir() } +func (fi fileInfo) ModTime() time.Time { return fi.h.ModTime } +func (fi fileInfo) Sys() interface{} { return fi.h } + +func (fi fileInfo) Mode() (mode os.FileMode) { + mode = os.FileMode(fi.h.Mode).Perm() + if fi.h.Mode&ModeSetuid != 0 { + mode |= os.ModeSetuid + } + if fi.h.Mode&ModeSetgid != 0 { + mode |= os.ModeSetgid + } + if fi.h.Mode&ModeSticky != 0 { + mode |= os.ModeSticky + } + m := os.FileMode(fi.h.Mode) & ModeType + if m == TypeDir { + mode |= os.ModeDir + } + if m == TypeFifo { + mode |= os.ModeNamedPipe + } + if m == TypeSymlink { + mode |= os.ModeSymlink + } + if m == TypeBlock { + mode |= os.ModeDevice + } + if m == TypeChar { + mode |= os.ModeDevice | os.ModeCharDevice + } + if m == TypeSocket { + mode |= os.ModeSocket + } + return mode +} diff --git a/vendor/github.com/cavaliergopher/cpio/hash.go b/vendor/github.com/cavaliergopher/cpio/hash.go new file mode 100644 index 0000000000..0af86d99ea --- /dev/null +++ b/vendor/github.com/cavaliergopher/cpio/hash.go @@ -0,0 +1,45 @@ +package cpio + +import ( + "encoding/binary" + "hash" +) + +type digest struct { + sum uint32 +} + +// NewHash returns a new hash.Hash32 for computing SVR4 checksums. +func NewHash() hash.Hash32 { + return &digest{} +} + +func (d *digest) Write(p []byte) (n int, err error) { + for _, b := range p { + d.sum += uint32(b & 0xFF) + } + + return len(p), nil +} + +func (d *digest) Sum(b []byte) []byte { + out := [4]byte{} + binary.LittleEndian.PutUint32(out[:], d.sum) + return append(b, out[:]...) +} + +func (d *digest) Sum32() uint32 { + return d.sum +} + +func (d *digest) Reset() { + d.sum = 0 +} + +func (d *digest) Size() int { + return 4 +} + +func (d *digest) BlockSize() int { + return 1 +} diff --git a/vendor/github.com/cavaliergopher/cpio/header.go b/vendor/github.com/cavaliergopher/cpio/header.go new file mode 100644 index 0000000000..d24e301e27 --- /dev/null +++ b/vendor/github.com/cavaliergopher/cpio/header.go @@ -0,0 +1,156 @@ +package cpio + +import ( + "errors" + "fmt" + "os" + "time" +) + +const ( + // TypeReg indicates a regular file + TypeReg = 0100000 + + // The following are header-only flags and may not have a data body. + TypeSocket = 0140000 // Socket + TypeSymlink = 0120000 // Symbolic link + TypeBlock = 060000 // Block device node + TypeDir = 040000 // Directory + TypeChar = 020000 // Character device node + TypeFifo = 010000 // FIFO node +) + +const ( + ModeSetuid = 04000 // Set uid + ModeSetgid = 02000 // Set gid + ModeSticky = 01000 // Save text (sticky bit) + + ModeType = 0170000 // Mask for the type bits + ModePerm = 0777 // Unix permission bits +) + +const ( + // headerEOF is the value of the filename of the last header in a CPIO archive. + headerEOF = "TRAILER!!!" +) + +var ( + // ErrHeader indicates there was an error decoding a CPIO header entry. + ErrHeader = errors.New("cpio: invalid cpio header") +) + +// A FileMode represents a file's mode and permission bits. +type FileMode uint32 + +func (m FileMode) String() string { + return fmt.Sprintf("%#o", m) +} + +// IsDir reports whether m describes a directory. That is, it tests for the +// TypeDir bit being set in m. +func (m FileMode) IsDir() bool { + return m&TypeDir != 0 +} + +// IsRegular reports whether m describes a regular file. That is, it tests for +// the TypeReg bit being set in m. +func (m FileMode) IsRegular() bool { + return m&^ModePerm == TypeReg +} + +// Perm returns the Unix permission bits in m. +func (m FileMode) Perm() FileMode { + return m & ModePerm +} + +// A Header represents a single header in a CPIO archive. Some fields may not be +// populated. +// +// For forward compatibility, users that retrieve a Header from Reader.Next, +// mutate it in some ways, and then pass it back to Writer.WriteHeader should do +// so by creating a new Header and copying the fields that they are interested +// in preserving. +type Header struct { + Name string // Name of the file entry + Linkname string // Target name of link (valid for TypeLink or TypeSymlink) + Links int // Number of inbound links + + Size int64 // Size in bytes + Mode FileMode // Permission and mode bits + Uid int // User id of the owner + Guid int // Group id of the owner + + ModTime time.Time // Modification time + + Checksum uint32 // Computed checksum + + DeviceID int + Inode int64 // Inode number + + pad int64 // bytes to pad before next header +} + +// FileInfo returns an fs.FileInfo for the Header. +func (h *Header) FileInfo() os.FileInfo { + return fileInfo{h} +} + +// FileInfoHeader creates a partially-populated Header from fi. If fi describes +// a symlink, FileInfoHeader records link as the link target. If fi describes a +// directory, a slash is appended to the name. +// +// Since fs.FileInfo's Name method returns only the base name of the file it +// describes, it may be necessary to modify Header.Name to provide the full path +// name of the file. +func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) { + if fi == nil { + return nil, errors.New("cpio: FileInfo is nil") + } + if sys, ok := fi.Sys().(*Header); ok { + // This FileInfo came from a Header (not the OS). Return a copy of the + // original Header. + h := &Header{} + *h = *sys + return h, nil + } + fm := fi.Mode() + h := &Header{ + Name: fi.Name(), + Mode: FileMode(fi.Mode().Perm()), // or'd with Mode* constants later + ModTime: fi.ModTime(), + Size: fi.Size(), + } + switch { + case fm.IsRegular(): + h.Mode |= TypeReg + case fi.IsDir(): + h.Mode |= TypeDir + h.Name += "/" + h.Size = 0 + case fm&os.ModeSymlink != 0: + h.Mode |= TypeSymlink + h.Linkname = link + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + h.Mode |= TypeChar + } else { + h.Mode |= TypeBlock + } + case fm&os.ModeNamedPipe != 0: + h.Mode |= TypeFifo + case fm&os.ModeSocket != 0: + h.Mode |= TypeSocket + default: + return nil, fmt.Errorf("cpio: unknown file mode %v", fm) + } + if fm&os.ModeSetuid != 0 { + h.Mode |= ModeSetuid + } + if fm&os.ModeSetgid != 0 { + h.Mode |= ModeSetgid + } + if fm&os.ModeSticky != 0 { + h.Mode |= ModeSticky + } + return h, nil +} diff --git a/vendor/github.com/cavaliergopher/cpio/reader.go b/vendor/github.com/cavaliergopher/cpio/reader.go new file mode 100644 index 0000000000..f2e5bec49c --- /dev/null +++ b/vendor/github.com/cavaliergopher/cpio/reader.go @@ -0,0 +1,72 @@ +package cpio + +import ( + "io" + "io/ioutil" +) + +// Reader provides sequential access to the contents of a CPIO archive. +// Reader.Next advances to the next file in the archive (including the first), +// and then Reader can be treated as an io.Reader to access the file's data. +type Reader struct { + r io.Reader // underlying file reader + hdr *Header // current Header + eof int64 // bytes until the end of the current file +} + +// NewReader creates a new Reader reading from r. +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + } +} + +// Read reads from the current file in the CPIO archive. It returns (0, io.EOF) +// when it reaches the end of that file, until Next is called to advance to the +// next file. +// +// Calling Read on special types like TypeLink, TypeSymlink, TypeChar, +// TypeBlock, TypeDir, and TypeFifo returns (0, io.EOF) regardless of what the +// Header.Size claims. +func (r *Reader) Read(p []byte) (n int, err error) { + if r.hdr == nil || r.eof == 0 { + return 0, io.EOF + } + rn := len(p) + if r.eof < int64(rn) { + rn = int(r.eof) + } + n, err = r.r.Read(p[0:rn]) + r.eof -= int64(n) + return +} + +// Next advances to the next entry in the CPIO archive. The Header.Size +// determines how many bytes can be read for the next file. Any remaining data +// in the current file is automatically discarded. +// +// io.EOF is returned at the end of the input. +func (r *Reader) Next() (*Header, error) { + if r.hdr == nil { + return r.next() + } + skp := r.eof + r.hdr.pad + if skp > 0 { + _, err := io.CopyN(ioutil.Discard, r.r, skp) + if err != nil { + return nil, err + } + } + return r.next() +} + +func (r *Reader) next() (*Header, error) { + r.eof = 0 + hdr, err := readSVR4Header(r.r) + if err != nil { + return nil, err + } + r.hdr = hdr + r.eof = hdr.Size + return hdr, nil +} diff --git a/vendor/github.com/cavaliergopher/cpio/svr4.go b/vendor/github.com/cavaliergopher/cpio/svr4.go new file mode 100644 index 0000000000..29866bf5fd --- /dev/null +++ b/vendor/github.com/cavaliergopher/cpio/svr4.go @@ -0,0 +1,152 @@ +package cpio + +import ( + "bytes" + "fmt" + "io" + "strconv" + "time" +) + +const ( + svr4MaxNameSize = 4096 // MAX_PATH + svr4MaxFileSize = 4294967295 +) + +var svr4Magic = []byte{0x30, 0x37, 0x30, 0x37, 0x30, 0x31} // 070701 + +func readHex(s string) int64 { + // errors are ignored and 0 returned + i, _ := strconv.ParseInt(s, 16, 64) + return i +} + +func writeHex(b []byte, i int64) { + // i needs to be in range of uint32 + copy(b, fmt.Sprintf("%08X", i)) +} + +func readSVR4Header(r io.Reader) (*Header, error) { + var buf [110]byte + if _, err := io.ReadFull(r, buf[:]); err != nil { + return nil, err + } + + // TODO: check endianness + + // check magic + hasCRC := false + if !bytes.HasPrefix(buf[:], svr4Magic[:5]) { + return nil, ErrHeader + } + if buf[5] == '2' { + hasCRC = true + } else if buf[5] != '1' { + return nil, ErrHeader + } + + asc := string(buf[:]) + hdr := &Header{ + Inode: readHex(asc[6:14]), + Mode: FileMode(readHex(asc[14:22])), + Uid: int(readHex(asc[22:30])), + Guid: int(readHex(asc[30:38])), + Links: int(readHex(asc[38:46])), + ModTime: time.Unix(readHex(asc[46:54]), 0), + Size: readHex(asc[54:62]), + } + if hdr.Size > svr4MaxFileSize { + return nil, ErrHeader + } + nameSize := readHex(asc[94:102]) + if nameSize < 1 || nameSize > svr4MaxNameSize { + return nil, ErrHeader + } + hdr.Checksum = uint32(readHex(asc[102:110])) + if !hasCRC && hdr.Checksum != 0 { + return nil, ErrHeader + } + + name := make([]byte, nameSize) + if _, err := io.ReadFull(r, name); err != nil { + return nil, err + } + hdr.Name = string(name[:nameSize-1]) + if hdr.Name == headerEOF { + return nil, io.EOF + } + + // store padding between end of file and next header + hdr.pad = (4 - (hdr.Size % 4)) % 4 + + // skip to end of header/start of file + pad := (4 - (len(buf)+len(name))%4) % 4 + if pad > 0 { + if _, err := io.ReadFull(r, buf[:pad]); err != nil { + return nil, err + } + } + + // read link name + if hdr.Mode&^ModePerm == TypeSymlink { + if hdr.Size < 1 || hdr.Size > svr4MaxNameSize { + return nil, ErrHeader + } + b := make([]byte, hdr.Size) + if _, err := io.ReadFull(r, b); err != nil { + return nil, err + } + hdr.Linkname = string(b) + hdr.Size = 0 + } + + return hdr, nil +} + +func writeSVR4Header(w io.Writer, hdr *Header) (pad int64, err error) { + var hdrBuf [110]byte + for i := 0; i < len(hdrBuf); i++ { + hdrBuf[i] = '0' + } + magic := svr4Magic + if hdr.Checksum != 0 { + magic[5] = 0x32 + } + copy(hdrBuf[:], magic) + writeHex(hdrBuf[6:14], hdr.Inode) + writeHex(hdrBuf[14:22], int64(hdr.Mode)) + writeHex(hdrBuf[22:30], int64(hdr.Uid)) + writeHex(hdrBuf[30:38], int64(hdr.Guid)) + writeHex(hdrBuf[38:46], int64(hdr.Links)) + if !hdr.ModTime.IsZero() { + writeHex(hdrBuf[46:54], hdr.ModTime.Unix()) + } + writeHex(hdrBuf[54:62], hdr.Size) + writeHex(hdrBuf[94:102], int64(len(hdr.Name)+1)) + if hdr.Checksum != 0 { + writeHex(hdrBuf[102:110], int64(hdr.Checksum)) + } + + // write header + _, err = w.Write(hdrBuf[:]) + if err != nil { + return + } + + // write filename + _, err = io.WriteString(w, hdr.Name+"\x00") + if err != nil { + return + } + + // pad to end of filename + npad := (4 - ((len(hdrBuf) + len(hdr.Name) + 1) % 4)) % 4 + _, err = w.Write(zeroBlock[:npad]) + if err != nil { + return + } + + // compute padding to end of file + pad = (4 - (hdr.Size % 4)) % 4 + return +} diff --git a/vendor/github.com/cavaliergopher/cpio/writer.go b/vendor/github.com/cavaliergopher/cpio/writer.go new file mode 100644 index 0000000000..7f2bdad841 --- /dev/null +++ b/vendor/github.com/cavaliergopher/cpio/writer.go @@ -0,0 +1,140 @@ +package cpio + +import ( + "errors" + "fmt" + "io" +) + +var ( + // ErrWriteTooLong indicates that an attempt was made to write more than + // Header.Size bytes to the current file. + ErrWriteTooLong = errors.New("cpio: write too long") + + // ErrWriteAfterClose indicates that an attempt was made to write to the + // CPIO archive after it was closed. + ErrWriteAfterClose = errors.New("cpio: write after close") +) + +var trailer = &Header{ + Name: headerEOF, + Links: 1, +} + +var zeroBlock [4]byte + +// Writer provides sequential writing of a CPIO archive. Write.WriteHeader +// begins a new file with the provided Header, and then Writer can be treated as +// an io.Writer to supply that file's data. +type Writer struct { + w io.Writer + nb int64 // number of unwritten bytes for current file entry + pad int64 // amount of padding to write after current file entry + inode int64 + err error + closed bool +} + +// NewWriter creates a new Writer writing to w. +func NewWriter(w io.Writer) *Writer { + return &Writer{w: w} +} + +// Flush finishes writing the current file's block padding. The current file +// must be fully written before Flush can be called. +// +// This is unnecessary as the next call to WriteHeader or Close will implicitly +// flush out the file's padding. +func (w *Writer) Flush() error { + if w.nb > 0 { + w.err = fmt.Errorf("cpio: missed writing %d bytes", w.nb) + return w.err + } + _, w.err = w.w.Write(zeroBlock[:w.pad]) + if w.err != nil { + return w.err + } + w.nb = 0 + w.pad = 0 + return w.err +} + +// WriteHeader writes hdr and prepares to accept the file's contents. The +// Header.Size determines how many bytes can be written for the next file. If +// the current file is not fully written, then this returns an error. This +// implicitly flushes any padding necessary before writing the header. +func (w *Writer) WriteHeader(hdr *Header) (err error) { + if w.closed { + return ErrWriteAfterClose + } + if w.err == nil { + w.Flush() + } + if w.err != nil { + return w.err + } + if hdr.Name != headerEOF { + // TODO: should we be mutating hdr here? + // ensure all inodes are unique + w.inode++ + if hdr.Inode == 0 { + hdr.Inode = w.inode + } + + // ensure file type is set + if hdr.Mode&^ModePerm == 0 { + hdr.Mode |= TypeReg + } + + // ensure regular files have at least 1 inbound link + if hdr.Links < 1 && hdr.Mode.IsRegular() { + hdr.Links = 1 + } + } + + w.nb = hdr.Size + w.pad, w.err = writeSVR4Header(w.w, hdr) + return +} + +// Write writes to the current file in the CPIO archive. Write returns the error +// ErrWriteTooLong if more than Header.Size bytes are written after WriteHeader. +// +// Calling Write on special types like TypeLink, TypeSymlink, TypeChar, +// TypeBlock, TypeDir, and TypeFifo returns (0, ErrWriteTooLong) regardless of +// what the Header.Size claims. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.closed { + err = ErrWriteAfterClose + return + } + overwrite := false + if int64(len(p)) > w.nb { + p = p[0:w.nb] + overwrite = true + } + n, err = w.w.Write(p) + w.nb -= int64(n) + if err == nil && overwrite { + err = ErrWriteTooLong + return + } + w.err = err + return +} + +// Close closes the CPIO archive by flushing the padding, and writing the +// footer. If the current file (from a prior call to WriteHeader) is not fully +// written, then this returns an error. +func (w *Writer) Close() error { + if w.err != nil || w.closed { + return w.err + } + w.err = w.WriteHeader(trailer) + if w.err != nil { + return w.err + } + w.Flush() + w.closed = true + return w.err +} diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS index dffacff112..0728bfe18f 100644 --- a/vendor/github.com/docker/docker/AUTHORS +++ b/vendor/github.com/docker/docker/AUTHORS @@ -1,5 +1,6 @@ -# This file lists all individuals having contributed content to the repository. -# For how it is generated, see `hack/generate-authors.sh`. +# File @generated by hack/generate-authors.sh. DO NOT EDIT. +# This file lists all contributors to the repository. +# See hack/generate-authors.sh to make modifications. Aanand Prasad Aaron Davidson @@ -7,16 +8,17 @@ Aaron Feng Aaron Hnatiw Aaron Huslage Aaron L. Xu -Aaron Lehmann +Aaron Lehmann Aaron Welch -Aaron.L.Xu Abel Muiño Abhijeet Kasurde -Abhinandan Prativadi +Abhinandan Prativadi Abhinav Ajgaonkar Abhishek Chanda Abhishek Sharma Abin Shahab +Abirdcfly +Ada Mancini Adam Avilla Adam Dobrawy Adam Eijdenberg @@ -26,6 +28,7 @@ Adam Mills Adam Pointer Adam Singer Adam Walz +Adam Williams Addam Hardy Aditi Rajagopal Aditya @@ -51,6 +54,7 @@ Akihiro Suda Akim Demaille Akira Koyasu Akshay Karle +Akshay Moghe Al Tobey alambike Alan Hoyle @@ -58,9 +62,11 @@ Alan Scherger Alan Thompson Albert Callarisa Albert Zhang -Albin Kerouanton +Albin Kerouanton +Alec Benson Alejandro González Hevia Aleksa Sarai +Aleksandr Chebotov Aleksandrs Fadins Alena Prokharchyk Alessandro Boch @@ -72,6 +78,7 @@ Alex Crawford Alex Ellis Alex Gaynor Alex Goodman +Alex Nordlund Alex Olshansky Alex Samorukov Alex Warhawk @@ -79,7 +86,8 @@ Alexander Artemenko Alexander Boyd Alexander Larsson Alexander Midlash -Alexander Morozov +Alexander Morozov +Alexander Polakov Alexander Shopov Alexandre Beslic Alexandre Garnier @@ -90,7 +98,8 @@ Alexei Margasov Alexey Guskov Alexey Kotlyarov Alexey Shamrin -Alexis THOMAS +Alexis Ries +Alexis Thomas Alfred Landrum Ali Dehghani Alicia Lauerman @@ -103,6 +112,7 @@ Alvin Deng Alvin Richards amangoel Amen Belayneh +Ameya Gawde Amir Goldstein Amit Bakshi Amit Krishnan @@ -126,6 +136,7 @@ Andreas Köhler Andreas Savvides Andreas Tiefenthaler Andrei Gherzan +Andrei Ushakov Andrei Vagin Andrew C. Bodine Andrew Clay Shafer @@ -135,6 +146,7 @@ Andrew Gerrand Andrew Guenther Andrew He Andrew Hsu +Andrew Kim Andrew Kuklewicz Andrew Macgregor Andrew Macpherson @@ -150,15 +162,17 @@ Andrey Kolomentsev Andrey Petrov Andrey Stolbovsky André Martins -andy Andy Chambers andy diller Andy Goldstein Andy Kipp +Andy Lindeman Andy Rothfusz Andy Smith Andy Wilson +Andy Zhang Anes Hasicic +Angel Velazquez Anil Belur Anil Madhavapeddy Ankit Jain @@ -179,20 +193,24 @@ Antonio Murdaca Antonis Kalipetis Antony Messerli Anuj Bahuguna +Anuj Varma Anusha Ragunathan +Anyu Wang apocas Arash Deshmeh ArikaChen -Arko Dasgupta +Arko Dasgupta Arnaud Lefebvre -Arnaud Porterie +Arnaud Porterie Arnaud Rebillout +Artem Khramov Arthur Barr Arthur Gautier Artur Meyster Arun Gupta Asad Saeeduddin Asbjørn Enge +Austin Vazquez averagehuman Avi Das Avi Kivity @@ -200,17 +218,21 @@ Avi Miller Avi Vaid ayoshitake Azat Khuyiyakhmetov +Bao Yonglei Bardia Keyoumarsi Barnaby Gray Barry Allard Bartłomiej Piotrowski Bastiaan Bakker +Bastien Pascard bdevloed +Bearice Ren Ben Bonnefoy Ben Firshman Ben Golub Ben Gould Ben Hall +Ben Langfeld Ben Sargent Ben Severson Ben Toews @@ -218,6 +240,7 @@ Ben Wiklund Benjamin Atkin Benjamin Baker Benjamin Boudreau +Benjamin Böhmke Benjamin Yolken Benny Ng Benoit Chesneau @@ -231,12 +254,15 @@ Bhiraj Butala Bhumika Bayani Bilal Amarni Bill Wang +Billy Ridgway Bily Zhang Bin Liu Bingshen Wang +Bjorn Neergaard Blake Geno Boaz Shuster bobby abbott +Bojun Zhu Boqin Qin Boris Pruessmann Boshi Lian @@ -252,6 +278,7 @@ Brendan Dixon Brent Salisbury Brett Higgins Brett Kochendorfer +Brett Milford Brett Randall Brian (bex) Exelbierd Brian Bland @@ -282,6 +309,7 @@ Byung Kang Caleb Spare Calen Pennington Cameron Boehmer +Cameron Sparr Cameron Spear Campbell Allen Candid Dauth @@ -316,6 +344,7 @@ Charlie Drage Charlie Lewis Chase Bolt ChaYoung You +Chee Hau Lim Chen Chao Chen Chuanliang Chen Hanxiao @@ -325,6 +354,7 @@ Chen Qiu Cheng-mean Liu Chengfei Shang Chengguang Xu +Chenyang Yan chenyuzhu Chetan Birajdar Chewey @@ -339,6 +369,7 @@ Chris Fordham Chris Gavin Chris Gibson Chris Khoo +Chris Kreussling (Flatbush Gardener) Chris McKinnel Chris McKinnel Chris Price @@ -351,6 +382,7 @@ Chris Telfer Chris Wahl Chris Weyl Chris White +Christian Becker Christian Berendt Christian Brauner Christian Böhme @@ -359,6 +391,7 @@ Christian Persson Christian Rotzoll Christian Simon Christian Stefanescu +Christoph Ziebuhr Christophe Mehay Christophe Troestler Christophe Vidal @@ -372,7 +405,9 @@ Christy Norman Chun Chen Ciro S. Costa Clayton Coleman +Clint Armstrong Clinton Kitson +clubby789 Cody Roseborough Coenraad Loubser Colin Dunklau @@ -383,19 +418,23 @@ Colin Walters Collin Guarino Colm Hally companycy +Conor Evans Corbin Coleman Corey Farrell Cory Forsyth +Cory Snider cressie176 -CrimsonGlory Cristian Ariza Cristian Staretu cristiano balducci Cristina Yenyxe Gonzalez Garcia Cruceru Calin-Cristian CUI Wei +cuishuang +Cuong Manh Le Cyprian Gracz Cyril F +Da McGrady Daan van Berkel Daehyeok Mun Dafydd Crosby @@ -413,6 +452,7 @@ Dan Hirsch Dan Keder Dan Levy Dan McPherson +Dan Plamadeala Dan Stine Dan Williams Dani Hodovic @@ -433,6 +473,7 @@ Daniel Mizyrycki Daniel Nephin Daniel Norberg Daniel Nordberg +Daniel P. Berrangé Daniel Robinson Daniel S Daniel Sweet @@ -441,6 +482,7 @@ Daniel Watkins Daniel X Moore Daniel YC Lin Daniel Zhang +Daniele Rondina Danny Berger Danny Milosavljevic Danny Yates @@ -456,6 +498,7 @@ Dave Henderson Dave MacDonald Dave Tucker David Anderson +David Bellotti David Calavera David Chung David Corking @@ -470,9 +513,11 @@ David Lawrence David Lechner David M. Karr David Mackey +David Manouchehri David Mat David Mcanulty David McKay +David O'Rourke David P Hilton David Pelaez David R. Jenni @@ -503,14 +548,14 @@ Dennis Docter Derek Derek Derek Ch -Derek McGowan +Derek McGowan Deric Crago Deshi Xiao -devmeyster Devon Estes Devvyn Murphy Dharmit Shah Dhawal Yogesh Bhanushali +Dhilip Kumars Diego Romero Diego Siqueira Dieter Reuter @@ -522,9 +567,11 @@ Dimitris Rozakis Dimitry Andric Dinesh Subhraveti Ding Fei +dingwei Diogo Monica DiuDiugirl Djibril Koné +Djordje Lukic dkumor Dmitri Logvinenko Dmitri Shuralyov @@ -536,6 +583,8 @@ Dmitry Shyshkin Dmitry Smirnov Dmitry V. Krivenok Dmitry Vorobev +Dmytro Iakovliev +docker-unir[bot] Dolph Mathews Dominic Tubach Dominic Yin @@ -569,8 +618,9 @@ Eivind Uggedal Elan Ruusamäe Elango Sivanandam Elena Morozova -Eli Uriegas +Eli Uriegas Elias Faxö +Elias Koromilas Elias Probst Elijah Zupancic eluck @@ -580,6 +630,7 @@ Emil Hernvall Emily Maier Emily Rose Emir Ozer +Eng Zer Jun Enguerran Eohyung Lee epeterso @@ -588,6 +639,7 @@ Eric Curtin Eric G. Noriega Eric Hanchrow Eric Lee +Eric Mountain Eric Myhre Eric Paris Eric Rafaloff @@ -597,17 +649,21 @@ Eric Soderstrom Eric Yang Eric-Olivier Lamey Erica Windisch +Erich Cordoba Erik Bray Erik Dubbelboer Erik Hollensbe Erik Inge Bolsø Erik Kristensen +Erik Sipsma Erik St. Martin Erik Weathers Erno Hopearuoho Erwin van der Koogh +Espen Suenson Ethan Bell Ethan Mosbaugh +Euan Harris Euan Kemp Eugen Krizo Eugene Yakubovich @@ -657,6 +713,7 @@ Fengtu Wang Ferenc Szabo Fernando Fero Volar +Feroz Salam Ferran Rodenas Filipe Brandenburger Filipe Oliveira @@ -673,6 +730,7 @@ Florin Patan fonglh Foysal Iqbal Francesc Campoy +Francesco Degrassi Francesco Mari Francis Chuang Francisco Carriedo @@ -681,18 +739,23 @@ Frank Groeneveld Frank Herrmann Frank Macreery Frank Rosquin -frankyang +Frank Yang Fred Lifton Frederick F. Kautz IV +Frederico F. de Oliveira Frederik Loeffert Frederik Nordahl Jul Sabroe Freek Kalter Frieder Bluemle +frobnicaty <92033765+frobnicaty@users.noreply.github.com> +Frédéric Dalleau Fu JinLin Félix Baylac-Jacqué Félix Cantournet Gabe Rosenhouse Gabor Nagy +Gabriel Goller +Gabriel L. Somlo Gabriel Linder Gabriel Monroy Gabriel Nicolas Avellaneda @@ -707,12 +770,14 @@ Gaurav Singh Gaël PORTAY Genki Takiuchi GennadySpb +Geoff Levand Geoffrey Bachelet Geon Kim George Kontridze George MacRorie George Xie Georgi Hristozov +Georgy Yakovlev Gereon Frey German DZ Gert van Valkenhoef @@ -724,6 +789,7 @@ Gildas Cuisinier Giovan Isa Musthofa gissehel Giuseppe Mazzotta +Giuseppe Scrivano Gleb Fotengauer-Malinovskiy Gleb M Borisov Glyn Normington @@ -746,6 +812,8 @@ Guilhem Lettron Guilherme Salgado Guillaume Dufour Guillaume J. Charmes +Gunadhya S. <6939749+gunadhya@users.noreply.github.com> +Guoqiang QI guoxiuyan Guri Gurjeet Singh @@ -755,12 +823,13 @@ gwx296173 Günter Zöchbauer Haichao Yang haikuoliu +haining.cao Hakan Özler Hamish Hutchings Hannes Ljungberg Hans Kristian Flaatten Hans Rødtang -Hao Shu Wei +Hao Shu Wei Hao Zhang <21521210@zju.edu.cn> Harald Albers Harald Niesche @@ -792,15 +861,16 @@ Hu Tao HuanHuan Ye Huanzhong Zhang Huayi Zhang +Hugo Barrera Hugo Duncan Hugo Marisco <0x6875676f@gmail.com> +Hui Kang Hunter Blanks huqun Huu Nguyen -hyeongkyu.lee +Hyeongkyu Lee Hyzhou Zhy Iago López Galeiras -Ian Babrou Ian Bishop Ian Bull Ian Calvert @@ -817,6 +887,7 @@ Igor Dolzhikov Igor Karpovich Iliana Weller Ilkka Laukkanen +Illo Abdulrahim Ilya Dmitrichenko Ilya Gusev Ilya Khlopotov @@ -847,7 +918,8 @@ Jaivish Kothari Jake Champlin Jake Moshenko Jake Sanders -jakedt +Jakub Drahos +Jakub Guzik James Allen James Carey James Carr @@ -859,11 +931,14 @@ James Lal James Mills James Nesbitt James Nugent +James Sanders James Turnbull James Watkins-Harvey Jamie Hannaford Jamshid Afshar +Jan Breig Jan Chren +Jan Götte Jan Keromnes Jan Koprowski Jan Pazdziora @@ -876,7 +951,6 @@ Januar Wayong Jared Biel Jared Hocutt Jaroslaw Zabiello -jaseg Jasmine Hegman Jason A. Donenfeld Jason Divock @@ -891,10 +965,11 @@ Jason Shepherd Jason Smith Jason Sommer Jason Stangroome +Javier Bassi jaxgeller -Jay Jay Jay Kamat +Jay Lim Jean Rouge Jean-Baptiste Barth Jean-Baptiste Dalido @@ -912,12 +987,14 @@ Jeff Minard Jeff Nickoloff Jeff Silberman Jeff Welch +Jeff Zvier Jeffrey Bolle Jeffrey Morgan Jeffrey van Gogh Jenny Gebske Jeremy Chambers Jeremy Grosser +Jeremy Huntwork Jeremy Price Jeremy Qian Jeremy Unruh @@ -933,13 +1010,16 @@ Ji.Zhilong Jian Liao Jian Zhang Jiang Jinyang +Jianyong Wu Jie Luo Jie Ma Jihyun Hwang Jilles Oldenbeuving Jim Alateras +Jim Carroll Jim Ehrismann Jim Galasyn +Jim Lin Jim Minter Jim Perrin Jimmy Cuadra @@ -951,6 +1031,7 @@ Jiri Appl Jiri Popelka Jiuyue Ma Jiří Župka +Joakim Roubert Joao Fernandes Joao Trindade Joe Beda @@ -1012,6 +1093,7 @@ Joost Cassee Jordan Arentsen Jordan Jennings Jordan Sissel +Jordi Massaguer Pla Jorge Marin Jorit Kleine-Möllhoff Jose Diaz-Gonzalez @@ -1044,12 +1126,15 @@ Julien Pervillé Julien Pivotto Julio Guerra Julio Montes +Jun Du Jun-Ru Chang +junxu Jussi Nummelin Justas Brazauskas Justen Martin Justin Cormack Justin Force +Justin Keller <85903732+jk-vb@users.noreply.github.com> Justin Menga Justin Plock Justin Simonelis @@ -1062,6 +1147,7 @@ Jörg Thalheim K. Heller Kai Blin Kai Qiang Wu (Kennan) +Kaijie Chen Kamil Domański Kamjar Gerami Kanstantsin Shautsou @@ -1082,6 +1168,7 @@ Kawsar Saiyeed Kay Yan kayrus Kazuhiro Sera +Kazuyoshi Kato Ke Li Ke Xu Kei Ohmura @@ -1096,6 +1183,7 @@ Kenjiro Nakayama Kent Johnson Kenta Tada Kevin "qwazerty" Houdebert +Kevin Alvarez Kevin Burke Kevin Clark Kevin Feyrer @@ -1122,20 +1210,22 @@ knappe Kohei Tsuruta Koichi Shiraishi Konrad Kleine +Konrad Ponichtera Konstantin Gribov Konstantin L Konstantin Pelykh +Kostadin Plachkov Krasi Georgiev Krasimir Georgiev Kris-Mikael Krister Kristian Haugene Kristina Zabunova Krystian Wojcicki -Kun Zhang Kunal Kushwaha Kunal Tyagi Kyle Conroy Kyle Linden +Kyle Squizzato Kyle Wuolle kyu Lachlan Coote @@ -1151,20 +1241,25 @@ Lars R. Damerow Lars-Magnus Skog Laszlo Meszaros Laura Frank +Laurent Bernaille Laurent Erignoux Laurie Voss Leandro Siqueira +Lee Calcote Lee Chao <932819864@qq.com> Lee, Meng-Han -leeplay Lei Gong Lei Jitang +Leiiwang Len Weincier Lennie Leo Gallucci +Leonardo Nodari +Leonardo Taccari Leszek Kowalski Levi Blackstone Levi Gross +Levi Harrison Lewis Daly Lewis Marshall Lewis Peckover @@ -1173,11 +1268,12 @@ Liam Macgillavry Liana Lo Liang Mingqiang Liang-Chi Hsieh +liangwei Liao Qingwei Lifubang Lihua Tang Lily Guo -limsy +limeidan Lin Lu LingFaKe Linus Heckemann @@ -1207,6 +1303,7 @@ Lucas Chi Lucas Molas Lucas Silvestre Luciano Mores +Luis Henrique Mulinari Luis Martínez de Bartolomé Izquierdo Luiz Svoboda Lukas Heeren @@ -1222,7 +1319,7 @@ Ma Shimiao Mabin Madhan Raj Mookkandy Madhav Puri -Madhu Venugopal +Madhu Venugopal Mageee Mahesh Tiyyagura malnick @@ -1255,12 +1352,14 @@ Marius Gundersen Marius Sturm Marius Voila Mark Allen +Mark Feit Mark Jeromin Mark McGranaghan Mark McKinstry Mark Milstein Mark Oates Mark Parker +Mark Vainomaa Mark West Markan Patel Marko Mikulicic @@ -1269,11 +1368,14 @@ Markus Fix Markus Kortlang Martijn Dwars Martijn van Oosterhout +Martin Braun +Martin Dojcak Martin Honermeyer Martin Kelly Martin Mosegaard Amdisen Martin Muzatko Martin Redmond +Maru Newby Mary Anthony Masahito Zembutsu Masato Ohba @@ -1284,13 +1386,16 @@ Mathias Monnerville Mathieu Champlon Mathieu Le Marec - Pasquet Mathieu Parent +Mathieu Paturel Matt Apperson Matt Bachmann +Matt Bajor Matt Bentley Matt Haggard Matt Hoyle Matt McCormick Matt Moore +Matt Morrison <3maven@gmail.com> Matt Richardson Matt Rickard Matt Robenolt @@ -1305,12 +1410,14 @@ Matthew Riley Matthias Klumpp Matthias Kühnle Matthias Rampke +Matthieu Fronton Matthieu Hauglustaine Mattias Jernberg Mauricio Garavaglia mauriyouth Max Harmathy Max Shytikov +Max Timchenko Maxim Fedchyshyn Maxim Ivanov Maxim Kulkin @@ -1324,14 +1431,16 @@ Megan Kostick Mehul Kar Mei ChunTao Mengdi Gao +Menghui Chen Mert Yazıcıoğlu mgniu Micah Zoltu Michael A. Smith +Michael Beskin Michael Bridgen Michael Brown Michael Chiang -Michael Crosby +Michael Crosby Michael Currie Michael Friis Michael Gorsuch @@ -1340,6 +1449,7 @@ Michael Holzheu Michael Hudson-Doyle Michael Huettermann Michael Irwin +Michael Kuehn Michael Käufl Michael Neale Michael Nussbaum @@ -1349,23 +1459,29 @@ Michael Spetsiotis Michael Stapelberg Michael Steinert Michael Thies +Michael Weidmann Michael West Michael Zhao Michal Fojtik Michal Gebauer Michal Jemala +Michal Kostrzewa Michal Minář +Michal Rostecki Michal Wieczorek Michaël Pailloncy Michał Czeraszkiewicz Michał Gryko +Michał Kosek Michiel de Jong Mickaël Fortunato Mickaël Remars Miguel Angel Fernández Miguel Morales +Miguel Perez Mihai Borobocea Mihuleacc Sergiu +Mikael Davranche Mike Brown Mike Bush Mike Casas @@ -1384,6 +1500,7 @@ Mike Snitzer mikelinjie <294893458@qq.com> Mikhail Sobolev Miklos Szegedi +Milas Bowman Milind Chawre Miloslav Trmač mingqing @@ -1392,7 +1509,7 @@ Misty Stanley-Jones Mitch Capper Mizuki Urushida mlarcher -Mohammad Banikazemi +Mohammad Banikazemi Mohammad Nasirifar Mohammed Aaqib Ansari Mohit Soni @@ -1406,6 +1523,7 @@ Moysés Borges mrfly Mrunal Patel Muayyad Alsadi +Muhammad Zohaib Aslam Mustafa Akın Muthukumar R Máximo Cuadros @@ -1422,6 +1540,8 @@ Natasha Jarus Nate Brennand Nate Eagleson Nate Jones +Nathan Carlson +Nathan Herald Nathan Hsieh Nathan Kleyn Nathan LeClaire @@ -1445,6 +1565,7 @@ Nick Payne Nick Russo Nick Stenning Nick Stinemates +Nick Wood NickrenREN Nicola Kabar Nicolas Borboën @@ -1455,6 +1576,7 @@ Nicolas Kaiser Nicolas Sterchele Nicolas V Castet Nicolás Hock Isaza +Niel Drummond Nigel Poulton Nik Nyby Nikhil Chawla @@ -1472,6 +1594,7 @@ noducks Nolan Darilek Noriki Nakamura nponeccop +Nurahmadie Nuutti Kotivuori nzwsch O.S. Tezer @@ -1489,7 +1612,9 @@ Olle Jonsson Olli Janatuinen Olly Pomeroy Omri Shiv +Onur Filiz Oriol Francès +Oscar Bonilla <6f6231@gmail.com> Oskar Niburski Otto Kekäläinen Ouyang Liduo @@ -1502,10 +1627,12 @@ Pascal Borreli Pascal Hartig Patrick Böänziger Patrick Devine +Patrick Haas Patrick Hemmer Patrick Stapleton Patrik Cyvoct pattichen +Paul "TBBle" Hampson Paul paul Paul Annesley @@ -1520,6 +1647,7 @@ Paul Liljenberg Paul Morie Paul Nasrat Paul Weaver +Paulo Gomes Paulo Ribeiro Pavel Lobashov Pavel Matěja @@ -1530,6 +1658,7 @@ Pavel Tikhomirov Pavlos Ratis Pavol Vargovcik Pawel Konczalski +Paweł Gronowski Peeyush Gupta Peggy Li Pei Su @@ -1537,6 +1666,7 @@ Peng Tao Penghan Wang Per Weijnitz perhapszzy@sina.com +Pete Woods Peter Bourgon Peter Braden Peter Bücker @@ -1552,8 +1682,10 @@ Peter Salvatore Peter Volpe Peter Waller Petr Švihlík +Petros Angelatos Phil -Phil Estes +Phil Estes +Phil Sphicas Phil Spitler Philip Alexander Etling Philip Monroe @@ -1570,21 +1702,25 @@ Pierre Dal-Pra Pierre Wacrenier Pierre-Alain RIVIERE Piotr Bogdan -pixelistik +Piotr Karbowski Porjo Poul Kjeldager Sørensen Pradeep Chhetri Pradip Dhara +Pradipta Kr. Banerjee Prasanna Gautam Pratik Karki Prayag Verma Priya Wadhwa Projjol Banerji Przemek Hejman +Puneet Pruthi Pure White pysqz Qiang Huang +Qin TianHuan Qinglan Peng +Quan Tian qudongfang Quentin Brossard Quentin Perez @@ -1607,6 +1743,7 @@ Ramon van Alteren RaviTeja Pothana Ray Tsang ReadmeCritic +realityone Recursive Madman Reficul Regan McCooey @@ -1617,9 +1754,9 @@ Renaud Gaubert Rhys Hiltner Ri Xu Ricardo N Feliciano +Rich Horwood Rich Moyse Rich Seymour -Richard Richard Burnison Richard Harvey Richard Mathie @@ -1634,12 +1771,14 @@ Riku Voipio Riley Guerin Ritesh H Shukla Riyaz Faizullabhoy +Rob Cowsill <42620235+rcowsill@users.noreply.github.com> Rob Gulewich Rob Vesse Robert Bachmann Robert Bittle Robert Obryk Robert Schneider +Robert Shade Robert Stern Robert Terhaar Robert Wallis @@ -1652,6 +1791,7 @@ Robin Speekenbrink Robin Thoni robpc Rodolfo Carvalho +Rodrigo Campos Rodrigo Vaz Roel Van Nyen Roger Peppe @@ -1666,11 +1806,14 @@ Roma Sokolov Roman Dudin Roman Mazur Roman Strashkin +Roman Volosatovs +Roman Zabaluev Ron Smits Ron Williams Rong Gao Rong Zhang Rongxiang Song +Rony Weng root root root @@ -1690,13 +1833,16 @@ Russ Magee Ryan Abrams Ryan Anderson Ryan Aslett +Ryan Barry Ryan Belgrave +Ryan Campbell Ryan Detzel Ryan Fowler Ryan Liu Ryan McLaughlin Ryan O'Donnell Ryan Seto +Ryan Shea Ryan Simmen Ryan Stelly Ryan Thomas @@ -1706,9 +1852,9 @@ Ryan Zhang ryancooper7 RyanDeng Ryo Nakao +Ryoga Saito Rémy Greinhofer s. rannou -s00318865 Sabin Basyal Sachin Joshi Sagar Hani @@ -1728,8 +1874,9 @@ Sambuddha Basu Sami Wagiaalla Samuel Andaya Samuel Dion-Girardeau -Samuel Karp +Samuel Karp Samuel PHAN +sanchayanghosh Sandeep Bansal Sankar சங்கர் Sanket Saurav @@ -1745,6 +1892,7 @@ Satoshi Tagomori Scott Bessler Scott Collier Scott Johnston +Scott Percival Scott Stamp Scott Walls sdreyesg @@ -1757,6 +1905,9 @@ Sean P. Kane Sean Rodman Sebastiaan van Steenis Sebastiaan van Stijn +Sebastian Höffner +Sebastian Radloff +Sebastien Goasguen Senthil Kumar Selvaraj Senthil Kumaran SeongJae Park @@ -1776,12 +1927,15 @@ shaunol Shawn Landden Shawn Siefkas shawnhe +Shayan Pooya Shayne Wang Shekhar Gulati Sheng Yang Shengbo Song +Shengjing Zhu Shev Yan Shih-Yuan Lee +Shihao Xia Shijiang Wei Shijun Qin Shishir Mahajan @@ -1790,14 +1944,13 @@ Shourya Sarcar Shu-Wai Chow shuai-z Shukui Yang -Shuwei Hao Sian Lerk Lau +Siarhei Rasiukevich Sidhartha Mani sidharthamani Silas Sewell Silvan Jegen Simão Reis -Simei He Simon Barendse Simon Eskildsen Simon Ferquel @@ -1808,13 +1961,16 @@ Simon Vikstrom Sindhu S Sjoerd Langkemper skanehira +Smark Meng Solganik Alexander Solomon Hykes Song Gao Soshi Katsuta +Sotiris Salloumis Soulou Spencer Brown Spencer Smith +Spike Curtis Sridatta Thatipamala Sridhar Ratnakumar Srini Brahmaroutu @@ -1830,6 +1986,7 @@ Stefan S. Stefan Scherer Stefan Staudenmeyer Stefan Weil +Steffen Butzer Stephan Spindler Stephen Benjamin Stephen Crosby @@ -1848,7 +2005,9 @@ Steven Iveson Steven Merrill Steven Richards Steven Taylor +Stéphane Este-Gracias Stig Larsson +Su Wang Subhajit Ghosh Sujith Haridasan Sun Gengze <690388648@qq.com> @@ -1858,15 +2017,16 @@ Sunny Gogoi Suryakumar Sudar Sven Dowideit Swapnil Daingade -Sylvain Baubeau +Sylvain Baubeau Sylvain Bellemare Sébastien Sébastien HOUZÉ Sébastien Luttringer Sébastien Stormacq +Sören Tempel Tabakhase Tadej Janež -TAGOMORI Satoshi +Takuto Sato tang0th Tangi Colin Tatsuki Sugiura @@ -1877,18 +2037,21 @@ Ted M. Young Tehmasp Chaudhri Tejaswini Duggaraju Tejesh Mehta +Terry Chu terryding77 <550147740@qq.com> -tgic Thatcher Peskens theadactyl Thell 'Bo' Fowler Thermionix +Thiago Alves Silva Thijs Terlouw Thomas Bikeev Thomas Frössman Thomas Gazagnaire +Thomas Graf Thomas Grainger Thomas Hansen +Thomas Ledos Thomas Leonard Thomas Léveil Thomas Orozco @@ -1899,11 +2062,13 @@ Thomas Swift Thomas Tanaka Thomas Texier Ti Zhou +Tiago Seabra Tianon Gravi Tianyi Wang Tibor Vass Tiffany Jernigan Tiffany Low +Till Claassen Till Wegmüller Tim Tim Bart @@ -1915,11 +2080,14 @@ Tim Potter Tim Ruffles Tim Smith Tim Terhorst +Tim Wagner Tim Wang Tim Waugh Tim Wraight Tim Zju <21651152@zju.edu.cn> +timchenxiaoyu <837829664@qq.com> timfeirg +Timo Rothenpieler Timothy Hobbs tjwebb123 tobe @@ -1928,6 +2096,7 @@ Tobias Bradtke Tobias Gesellchen Tobias Klauser Tobias Munk +Tobias Pfandzelter Tobias Schmidt Tobias Schwab Todd Crane @@ -1941,25 +2110,33 @@ Tom Fotherby Tom Howe Tom Hulihan Tom Maaswinkel +Tom Parker Tom Sweeney Tom Wilkie Tom X. Tobin +Tom Zhao +Tomas Janousek +Tomas Kral Tomas Tomecek Tomasz Kopczynski Tomasz Lipinski Tomasz Nurkiewicz +Tomek Mańko Tommaso Visconti +Tomoya Tabuchi Tomáš Hrčka +tonic Tonny Xu Tony Abboud Tony Daws Tony Miller toogley Torstein Husebø +Toshiaki Makita Tõnis Tiigi Trace Andreason tracylihui <793912329@qq.com> -Trapier Marshall +Trapier Marshall Travis Cline Travis Thieman Trent Ogren @@ -1969,6 +2146,8 @@ Trevor Sullivan Trishna Guha Tristan Carel Troy Denton +Tudor Brindus +Ty Alexander Tycho Andersen Tyler Brock Tyler Brown @@ -1979,6 +2158,7 @@ Umesh Yadav Utz Bacher vagrant Vaidas Jablonskis +Valentin Kulesh vanderliang Velko Ivanov Veres Lajos @@ -1992,12 +2172,13 @@ Victor Palma Victor Vieux Victoria Bialas Vijaya Kumar K +Vikas Choudhary Vikram bir Singh Viktor Stanchev Viktor Vojnovski VinayRaghavanKS Vincent Batts -Vincent Bernat +Vincent Bernat Vincent Boulineau Vincent Demeester Vincent Giersch @@ -2017,9 +2198,9 @@ Vladimir Pouzanov Vladimir Rutsky Vladimir Varankin VladimirAus +Vladislav Kolesnikov Vlastimil Zeman Vojtech Vitek (V-Teq) -waitingkuo Walter Leibbrandt Walter Stanish Wang Chao @@ -2034,6 +2215,7 @@ wanghuaiqing Ward Vandewege WarheadsSE Wassim Dhif +Wataru Ishida Wayne Chang Wayne Song Weerasak Chongnguluam @@ -2048,7 +2230,6 @@ Wendel Fleming Wenjun Tang Wenkai Yin wenlxie -Wentao Zhang Wenxuan Zhao Wenyu You <21551128@zju.edu.cn> Wenzhi Liang @@ -2068,16 +2249,22 @@ William Thurston Wilson Júnior Wing-Kam Wong WiseTrem +Wolfgang Nagele Wolfgang Powisch Wonjun Kim +WuLonghui xamyzhao +Xia Wu Xian Chaobo Xianglin Gao +Xianjie Xianlu Bird Xiao YongBiao +Xiao Zhang XiaoBing Jiang Xiaodong Liu Xiaodong Zhang +Xiaohua Ding Xiaoxi He Xiaoxu Chen Xiaoyu Zhang @@ -2092,12 +2279,16 @@ Xuecong Liao xuzhaokui Yadnyawalkya Tale Yahya +yalpul YAMADA Tsuyoshi Yamasaki Masahide Yan Feng +Yan Zhu Yang Bai +Yang Li Yang Pengfei yangchenliang +Yann Autissier Yanqiang Miao Yao Zaiyong Yash Murty @@ -2117,6 +2308,7 @@ Yosef Fertel You-Sheng Yang (楊有勝) youcai Youcef YEKHLEF +Youfu Zhang Yu Changchun Yu Chengxia Yu Peng @@ -2124,14 +2316,18 @@ Yu-Ju Hong Yuan Sun Yuanhong Peng Yue Zhang +Yufei Xiong Yuhao Fang Yuichiro Kaneko +YujiOshima Yunxiang Huang Yurii Rashkovskii Yusuf Tarık Günaydın +Yves Blusseau <90z7oey02@sneakemail.com> Yves Junqueira Zac Dover Zach Borboa +Zach Gershman Zachary Jaffee Zain Memon Zaiste! @@ -2147,6 +2343,7 @@ Zhenan Ye <21551168@zju.edu.cn> zhenghenghuo Zhenhai Gao Zhenkun Bi +ZhiPeng Lu zhipengzuo Zhou Hao Zhoulin Xie @@ -2164,7 +2361,6 @@ Zou Yu zqh Zuhayr Elahi Zunayed Ali -Álex González Álvaro Lázaro Átila Camurça Alves 尹吉峰 @@ -2173,3 +2369,4 @@ Zunayed Ali 慕陶 搏通 黄艳红00139573 +정재영 diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go index 1565e2af64..bee9b875a8 100644 --- a/vendor/github.com/docker/docker/api/common.go +++ b/vendor/github.com/docker/docker/api/common.go @@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api" // Common constants for daemon and client. const ( // DefaultVersion of Current REST API - DefaultVersion = "1.41" + DefaultVersion = "1.42" // NoBaseImageSpecifier is the symbol used by the FROM // command to specify that no base image is to be used. diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index c24f57bc9a..afe7a8c371 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -19,10 +19,10 @@ produces: consumes: - "application/json" - "text/plain" -basePath: "/v1.41" +basePath: "/v1.42" info: title: "Docker Engine API" - version: "1.41" + version: "1.42" x-logo: url: "https://docs.docker.com/assets/images/logo-docker-main.png" description: | @@ -55,8 +55,8 @@ info: the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. - If you omit the version-prefix, the current version of the API (v1.41) is used. - For example, calling `/info` is the same as calling `/v1.41/info`. Using the + If you omit the version-prefix, the current version of the API (v1.42) is used. + For example, calling `/info` is the same as calling `/v1.42/info`. Using the API without a version-prefix is deprecated and will be removed in a future release. Engine releases in the near future should support this version of the API, @@ -202,24 +202,74 @@ definitions: MountPoint: type: "object" - description: "A mount point inside a container" + description: | + MountPoint represents a mount point configuration inside the container. + This is used for reporting the mountpoints in use by a container. properties: Type: + description: | + The mount type: + + - `bind` a mount of a file or directory from the host into the container. + - `volume` a docker volume with the given `Name`. + - `tmpfs` a `tmpfs`. + - `npipe` a named pipe from the host into the container. + - `cluster` a Swarm cluster volume type: "string" + enum: + - "bind" + - "volume" + - "tmpfs" + - "npipe" + - "cluster" + example: "volume" Name: + description: | + Name is the name reference to the underlying data defined by `Source` + e.g., the volume name. type: "string" + example: "myvolume" Source: + description: | + Source location of the mount. + + For volumes, this contains the storage location of the volume (within + `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains + the source (host) part of the bind-mount. For `tmpfs` mount points, this + field is empty. type: "string" + example: "/var/lib/docker/volumes/myvolume/_data" Destination: + description: | + Destination is the path relative to the container root (`/`) where + the `Source` is mounted inside the container. type: "string" + example: "/usr/share/nginx/html/" Driver: + description: | + Driver is the volume driver used to create the volume (if it is a volume). type: "string" + example: "local" Mode: + description: | + Mode is a comma separated list of options supplied by the user when + creating the bind/volume mount. + + The default is platform-specific (`"z"` on Linux, empty on Windows). type: "string" + example: "z" RW: + description: | + Whether the mount is mounted writable (read-write). type: "boolean" + example: true Propagation: + description: | + Propagation describes how mounts are propagated from the host into the + mount point, and vice-versa. Refer to the [Linux kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt) + for details. This field is not used on Windows. type: "string" + example: "" DeviceMapping: type: "object" @@ -302,12 +352,14 @@ definitions: - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. + - `cluster` a Swarm cluster volume type: "string" enum: - "bind" - "volume" - "tmpfs" - "npipe" + - "cluster" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" @@ -332,6 +384,10 @@ definitions: description: "Disable recursive bind mount." type: "boolean" default: false + CreateMountpoint: + description: "Create mount point on host if missing" + type: "boolean" + default: false VolumeOptions: description: "Optional configuration for the `volume` type." type: "object" @@ -382,11 +438,13 @@ definitions: type: "string" description: | - Empty string means not to restart + - `no` Do not automatically restart - `always` Always restart - `unless-stopped` Restart always except when the user has manually stopped the container - `on-failure` Restart only when the container exit code is non-zero enum: - "" + - "no" - "always" - "unless-stopped" - "on-failure" @@ -527,19 +585,13 @@ definitions: type: "array" items: $ref: "#/definitions/DeviceRequest" - KernelMemory: + KernelMemoryTCP: description: | - Kernel memory limit in bytes. - -


+ Hard limit for kernel TCP buffer memory (in bytes). Depending on the + OCI runtime in use, this option may be ignored. It is no longer supported + by the default (runc) runtime. - > **Deprecated**: This field is deprecated as the kernel 5.4 deprecated - > `kmem.limit_in_bytes`. - type: "integer" - format: "int64" - example: 209715200 - KernelMemoryTCP: - description: "Hard limit for kernel TCP buffer memory (in bytes)." + This field is omitted when empty. type: "integer" format: "int64" MemoryReservation: @@ -723,11 +775,13 @@ definitions: The time to wait between checks in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" + format: "int64" Timeout: description: | The time to wait before considering the check to have hung. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" + format: "int64" Retries: description: | The number of consecutive failures needed to consider a container as @@ -739,11 +793,13 @@ definitions: health-retries countdown in nanoseconds. It should be 0 or at least 1000000 (1 ms). 0 means inherit. type: "integer" + format: "int64" Health: description: | Health stores information about the container's healthcheck results. type: "object" + x-nullable: true properties: Status: description: | @@ -769,13 +825,13 @@ definitions: description: | Log contains the last few results (oldest first) items: - x-nullable: true $ref: "#/definitions/HealthcheckResult" HealthcheckResult: description: | HealthcheckResult stores information about a single run of a healthcheck probe type: "object" + x-nullable: true properties: Start: description: | @@ -910,6 +966,16 @@ definitions: type: "array" items: $ref: "#/definitions/Mount" + ConsoleSize: + type: "array" + description: | + Initial console size, as an `[height, width]` array. + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 # Applicable to UNIX platforms CapAdd: @@ -1024,8 +1090,9 @@ definitions: description: "Mount the container's root filesystem as read only." SecurityOpt: type: "array" - description: "A list of string values to customize labels for MLS - systems, such as SELinux." + description: | + A list of string values to customize labels for MLS systems, such + as SELinux. items: type: "string" StorageOpt: @@ -1073,15 +1140,6 @@ definitions: type: "string" description: "Runtime to use with this container." # Applicable to Windows - ConsoleSize: - type: "array" - description: | - Initial console size, as an `[height, width]` array. (Windows only) - minItems: 2 - maxItems: 2 - items: - type: "integer" - minimum: 0 Isolation: type: "string" description: | @@ -1106,14 +1164,25 @@ definitions: type: "string" ContainerConfig: - description: "Configuration for a container that is portable between hosts" + description: | + Configuration for a container that is portable between hosts. + + When used as `ContainerConfig` field in an image, `ContainerConfig` is an + optional field containing the configuration of the container that was last + committed when creating the image. + + Previous versions of Docker builder used this field to store build cache, + and it is not in active use anymore. type: "object" properties: Hostname: - description: "The hostname to use for the container, as a valid RFC 1123 hostname." + description: | + The hostname to use for the container, as a valid RFC 1123 hostname. type: "string" + example: "439f4e91bd1d" Domainname: - description: "The domain name to use for the container." + description: | + The domain name to use for the container. type: "string" User: description: "The user that commands are run as inside the container." @@ -1136,11 +1205,16 @@ definitions: `{"/": {}}` type: "object" + x-nullable: true additionalProperties: type: "object" enum: - {} default: {} + example: { + "80/tcp": {}, + "443/tcp": {} + } Tty: description: | Attach standard streams to a TTY, including `stdin` if it is not closed. @@ -1162,21 +1236,29 @@ definitions: type: "array" items: type: "string" + example: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" Cmd: description: | Command to run specified as a string or an array of strings. type: "array" items: type: "string" + example: ["/bin/sh"] Healthcheck: $ref: "#/definitions/HealthConfig" ArgsEscaped: description: "Command is already escaped (Windows only)" type: "boolean" + default: false + example: false + x-nullable: true Image: description: | - The name of the image to use when creating the container/ + The name (or reference) of the image to use when creating the container, + or which was used when the container was created. type: "string" + example: "example-image:1.0" Volumes: description: | An object mapping mount point paths inside the container to empty @@ -1190,6 +1272,7 @@ definitions: WorkingDir: description: "The working directory for commands to run in." type: "string" + example: "/public/" Entrypoint: description: | The entry point for the container as a string or an array of strings. @@ -1200,38 +1283,50 @@ definitions: type: "array" items: type: "string" + example: [] NetworkDisabled: description: "Disable networking for the container." type: "boolean" + x-nullable: true MacAddress: description: "MAC address of the container." type: "string" + x-nullable: true OnBuild: description: | `ONBUILD` metadata that were defined in the image's `Dockerfile`. type: "array" + x-nullable: true items: type: "string" + example: [] Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" StopSignal: description: | Signal to stop a container as a string or unsigned integer. type: "string" - default: "SIGTERM" + example: "SIGTERM" + x-nullable: true StopTimeout: description: "Timeout to stop a container in seconds." type: "integer" default: 10 + x-nullable: true Shell: description: | Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. type: "array" + x-nullable: true items: type: "string" + example: ["/bin/sh", "-c"] NetworkingConfig: description: | @@ -1274,7 +1369,7 @@ definitions: type: "object" properties: Bridge: - description: Name of the network'a bridge (for example, `docker0`). + description: Name of the network's bridge (for example, `docker0`). type: "string" example: "docker0" SandboxID: @@ -1488,107 +1583,215 @@ definitions: example: "4443" GraphDriverData: - description: "Information about a container's graph driver." + description: | + Information about the storage driver used to store the container's and + image's filesystem. type: "object" required: [Name, Data] properties: Name: + description: "Name of the storage driver." type: "string" x-nullable: false + example: "overlay2" Data: + description: | + Low-level storage metadata, provided as key/value pairs. + + This information is driver-specific, and depends on the storage-driver + in use, and should be used for informational purposes only. type: "object" x-nullable: false additionalProperties: type: "string" + example: { + "MergedDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/merged", + "UpperDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/diff", + "WorkDir": "/var/lib/docker/overlay2/ef749362d13333e65fc95c572eb525abbe0052e16e086cb64bc3b98ae9aa6d74/work" + } - Image: + ImageInspect: + description: | + Information about an image in the local image cache. type: "object" - required: - - Id - - Parent - - Comment - - Created - - Container - - DockerVersion - - Author - - Architecture - - Os - - Size - - VirtualSize - - GraphDriver - - RootFS properties: Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. type: "string" x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. type: "array" items: type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. type: "array" items: type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" Parent: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. type: "string" x-nullable: false + example: "" Comment: + description: | + Optional message that was set when committing or importing the image. type: "string" x-nullable: false + example: "" Created: + description: | + Date and time at which the image was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" x-nullable: false + example: "2022-02-04T21:20:12.497794809Z" Container: + description: | + The ID of the container that was used to create the image. + + Depending on how the image was created, this field may be empty. type: "string" x-nullable: false + example: "65974bc86f1770ae4bff79f651ebdbce166ae9aada632ee3fa9af3a264911735" ContainerConfig: $ref: "#/definitions/ContainerConfig" DockerVersion: + description: | + The version of Docker that was used to build the image. + + Depending on how the image was created, this field may be empty. type: "string" x-nullable: false + example: "20.10.7" Author: + description: | + Name of the author that was specified when committing the image, or as + specified through MAINTAINER (deprecated) in the Dockerfile. type: "string" x-nullable: false + example: "" Config: $ref: "#/definitions/ContainerConfig" Architecture: + description: | + Hardware CPU architecture that the image runs on. type: "string" x-nullable: false + example: "arm" + Variant: + description: | + CPU architecture variant (presently ARM-only). + type: "string" + x-nullable: true + example: "v7" Os: + description: | + Operating System the image is built to run on. type: "string" x-nullable: false + example: "linux" OsVersion: + description: | + Operating System version the image is built to run on (especially + for Windows). type: "string" + example: "" + x-nullable: true Size: + description: | + Total size of the image including all layers it is composed of. type: "integer" format: "int64" x-nullable: false + example: 1239828 VirtualSize: + description: | + Total size of the image including all layers it is composed of. + + In versions of Docker before v1.10, this field was calculated from + the image itself and all of its parent images. Docker v1.10 and up + store images self-contained, and no longer use a parent-chain, making + this field an equivalent of the Size field. + + This field is kept for backward compatibility, but may be removed in + a future version of the API. type: "integer" format: "int64" x-nullable: false + example: 1239828 GraphDriver: $ref: "#/definitions/GraphDriverData" RootFS: + description: | + Information about the image's RootFS, including the layer IDs. type: "object" required: [Type] properties: Type: type: "string" x-nullable: false + example: "layers" Layers: type: "array" items: type: "string" - BaseLayer: - type: "string" + example: + - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" + - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" Metadata: + description: | + Additional metadata of the image in the local cache. This information + is local to the daemon, and not part of the image itself. type: "object" properties: LastTagTime: + description: | + Date and time at which the image was last tagged in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + + This information is only available if the image was tagged locally, + and omitted otherwise. type: "string" format: "dateTime" - + example: "2022-02-28T14:40:02.623929178Z" + x-nullable: true ImageSummary: type: "object" required: @@ -1604,41 +1807,120 @@ definitions: - Containers properties: Id: + description: | + ID is the content-addressable ID of an image. + + This identifier is a content-addressable digest calculated from the + image's configuration (which includes the digests of layers used by + the image). + + Note that this digest differs from the `RepoDigests` below, which + holds digests of image manifests that reference the image. type: "string" x-nullable: false + example: "sha256:ec3f0931a6e6b6855d76b2d7b0be30e81860baccd891b2e243280bf1cd8ad710" ParentId: + description: | + ID of the parent image. + + Depending on how the image was created, this field may be empty and + is only set for images that were built/created locally. This field + is empty if the image was pulled from an image registry. type: "string" x-nullable: false + example: "" RepoTags: + description: | + List of image names/tags in the local image cache that reference this + image. + + Multiple image tags can refer to the same image, and this list may be + empty if no tags reference the image, in which case the image is + "untagged", in which case it can still be referenced by its ID. type: "array" x-nullable: false items: type: "string" + example: + - "example:1.0" + - "example:latest" + - "example:stable" + - "internal.registry.example.com:5000/example:1.0" RepoDigests: + description: | + List of content-addressable digests of locally available image manifests + that the image is referenced from. Multiple manifests can refer to the + same image. + + These digests are usually only available if the image was either pulled + from a registry, or if the image was pushed to a registry, which is when + the manifest is generated and its digest calculated. type: "array" x-nullable: false items: type: "string" + example: + - "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb" + - "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578" Created: + description: | + Date and time at which the image was created as a Unix timestamp + (number of seconds sinds EPOCH). type: "integer" x-nullable: false + example: "1644009612" Size: + description: | + Total size of the image including all layers it is composed of. type: "integer" + format: "int64" x-nullable: false + example: 172064416 SharedSize: + description: | + Total size of image layers that are shared between this image and other + images. + + This size is not calculated by default. `-1` indicates that the value + has not been set / calculated. type: "integer" + format: "int64" x-nullable: false + example: 1239828 VirtualSize: + description: | + Total size of the image including all layers it is composed of. + + In versions of Docker before v1.10, this field was calculated from + the image itself and all of its parent images. Docker v1.10 and up + store images self-contained, and no longer use a parent-chain, making + this field an equivalent of the Size field. + + This field is kept for backward compatibility, but may be removed in + a future version of the API. type: "integer" + format: "int64" x-nullable: false + example: 172064416 Labels: + description: "User-defined key/value metadata." type: "object" x-nullable: false additionalProperties: type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" Containers: + description: | + Number of containers using this image. Includes both stopped and running + containers. + + This size is not calculated by default, and depends on which API endpoint + is used. `-1` indicates that the value has not been set / calculated. x-nullable: false type: "integer" + example: 2 AuthConfig: type: "object" @@ -1680,18 +1962,22 @@ definitions: type: "string" description: "Name of the volume." x-nullable: false + example: "tardis" Driver: type: "string" description: "Name of the volume driver used by the volume." x-nullable: false + example: "custom" Mountpoint: type: "string" description: "Mount path of the volume on the host." x-nullable: false + example: "/var/lib/docker/volumes/tardis" CreatedAt: type: "string" format: "dateTime" description: "Date/Time the volume was created." + example: "2016-06-07T20:31:11.853781916Z" Status: type: "object" description: | @@ -1703,12 +1989,17 @@ definitions: does not support this feature. additionalProperties: type: "object" + example: + hello: "world" Labels: type: "object" description: "User-defined key/value metadata." x-nullable: false additionalProperties: type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" Scope: type: "string" description: | @@ -1717,15 +2008,23 @@ definitions: default: "local" x-nullable: false enum: ["local", "global"] + example: "local" + ClusterVolume: + $ref: "#/definitions/ClusterVolume" Options: type: "object" description: | The driver specific options used when creating the volume. additionalProperties: type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" UsageData: type: "object" x-nullable: true + x-go-name: "UsageData" required: [Size, RefCount] description: | Usage details about the volume. This information is used by the @@ -1733,6 +2032,7 @@ definitions: properties: Size: type: "integer" + format: "int64" default: -1 description: | Amount of disk space used by the volume (in bytes). This information @@ -1742,23 +2042,71 @@ definitions: x-nullable: false RefCount: type: "integer" + format: "int64" default: -1 description: | The number of containers referencing this volume. This field is set to `-1` if the reference-count is not available. x-nullable: false - example: - Name: "tardis" - Driver: "custom" - Mountpoint: "/var/lib/docker/volumes/tardis" - Status: - hello: "world" + VolumeCreateOptions: + description: "Volume configuration" + type: "object" + title: "VolumeConfig" + x-go-name: "CreateOptions" + properties: + Name: + description: | + The new volume's name. If not specified, Docker generates a name. + type: "string" + x-nullable: false + example: "tardis" + Driver: + description: "Name of the volume driver to use." + type: "string" + default: "local" + x-nullable: false + example: "custom" + DriverOpts: + description: | + A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + type: "object" + additionalProperties: + type: "string" + example: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Scope: "local" - CreatedAt: "2016-06-07T20:31:11.853781916Z" + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + ClusterVolumeSpec: + $ref: "#/definitions/ClusterVolumeSpec" + + VolumeListResponse: + type: "object" + title: "VolumeListResponse" + x-go-name: "ListResponse" + description: "Volume list response" + properties: + Volumes: + type: "array" + description: "List of volumes" + items: + $ref: "#/definitions/Volume" + Warnings: + type: "array" + description: | + Warnings that occurred when fetching the list of volumes. + items: + type: "string" + example: [] Network: type: "object" @@ -1846,15 +2194,27 @@ definitions: ``` type: "array" items: - type: "object" - additionalProperties: - type: "string" + $ref: "#/definitions/IPAMConfig" Options: description: "Driver-specific options, specified as a map." type: "object" additionalProperties: type: "string" + IPAMConfig: + type: "object" + properties: + Subnet: + type: "string" + IPRange: + type: "string" + Gateway: + type: "string" + AuxiliaryAddresses: + type: "object" + additionalProperties: + type: "string" + NetworkContainer: type: "object" properties: @@ -1902,8 +2262,19 @@ definitions: Parent: description: | ID of the parent build cache record. + + > **Deprecated**: This field is deprecated, and omitted if empty. type: "string" - example: "hw53o5aio51xtltp5xjp8v7fx" + x-nullable: true + example: "" + Parents: + description: | + List of parent build cache record IDs. + type: "array" + items: + type: "string" + x-nullable: true + example: ["hw53o5aio51xtltp5xjp8v7fx"] Type: type: "string" description: | @@ -1972,6 +2343,8 @@ definitions: type: "string" error: type: "string" + errorDetail: + $ref: "#/definitions/ErrorDetail" status: type: "string" progress: @@ -2218,6 +2591,25 @@ definitions: type: "string" x-nullable: false + PluginPrivilege: + description: | + Describes a permission the user has to accept upon installing + the plugin. + type: "object" + x-go-name: "PluginPrivilege" + properties: + Name: + type: "string" + example: "network" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - "host" + Plugin: description: "A plugin for the Engine API" type: "object" @@ -3000,19 +3392,7 @@ definitions: PluginPrivilege: type: "array" items: - description: | - Describes a permission accepted by the user upon installing the - plugin. - type: "object" - properties: - Name: - type: "string" - Description: - type: "string" - Value: - type: "array" - items: - type: "string" + $ref: "#/definitions/PluginPrivilege" ContainerSpec: type: "object" description: | @@ -3661,6 +4041,7 @@ definitions: ServiceSpec: description: "User modifiable configuration for a service." + type: object properties: Name: description: "Name of the service." @@ -4052,73 +4433,71 @@ definitions: Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" ContainerSummary: - type: "array" - items: - type: "object" - properties: - Id: - description: "The ID of this container" + type: "object" + properties: + Id: + description: "The ID of this container" + type: "string" + x-go-name: "ID" + Names: + description: "The names that this container has been given" + type: "array" + items: type: "string" - x-go-name: "ID" - Names: - description: "The names that this container has been given" - type: "array" - items: - type: "string" - Image: - description: "The name of the image used when creating this container" - type: "string" - ImageID: - description: "The ID of the image that this container was created from" - type: "string" - Command: - description: "Command to run when starting the container" + Image: + description: "The name of the image used when creating this container" + type: "string" + ImageID: + description: "The ID of the image that this container was created from" + type: "string" + Command: + description: "Command to run when starting the container" + type: "string" + Created: + description: "When the container was created" + type: "integer" + format: "int64" + Ports: + description: "The ports exposed by this container" + type: "array" + items: + $ref: "#/definitions/Port" + SizeRw: + description: "The size of files that have been created or changed by this container" + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container" + type: "integer" + format: "int64" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: type: "string" - Created: - description: "When the container was created" - type: "integer" - format: "int64" - Ports: - description: "The ports exposed by this container" - type: "array" - items: - $ref: "#/definitions/Port" - SizeRw: - description: "The size of files that have been created or changed by this container" - type: "integer" - format: "int64" - SizeRootFs: - description: "The total size of all the files in this container" - type: "integer" - format: "int64" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: + State: + description: "The state of this container (e.g. `Exited`)" + type: "string" + Status: + description: "Additional human-readable status of this container (e.g. `Exit 0`)" + type: "string" + HostConfig: + type: "object" + properties: + NetworkMode: type: "string" - State: - description: "The state of this container (e.g. `Exited`)" - type: "string" - Status: - description: "Additional human-readable status of this container (e.g. `Exit 0`)" - type: "string" - HostConfig: - type: "object" - properties: - NetworkMode: - type: "string" - NetworkSettings: - description: "A summary of the container's network settings" - type: "object" - properties: - Networks: - type: "object" - additionalProperties: - $ref: "#/definitions/EndpointSettings" - Mounts: - type: "array" - items: - $ref: "#/definitions/Mount" + NetworkSettings: + description: "A summary of the container's network settings" + type: "object" + properties: + Networks: + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + Mounts: + type: "array" + items: + $ref: "#/definitions/MountPoint" Driver: description: "Driver represents a driver (network, logging, secrets)." @@ -4240,6 +4619,7 @@ definitions: ContainerState stores container's running state. It's part of ContainerJSONBase and will be returned by the "inspect" command. type: "object" + x-nullable: true properties: Status: description: | @@ -4297,9 +4677,52 @@ definitions: type: "string" example: "2020-01-06T09:07:59.461876391Z" Health: - x-nullable: true $ref: "#/definitions/Health" + ContainerCreateResponse: + description: "OK response to ContainerCreate operation" + type: "object" + title: "ContainerCreateResponse" + x-go-name: "CreateResponse" + required: [Id, Warnings] + properties: + Id: + description: "The ID of the created container" + type: "string" + x-nullable: false + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Warnings: + description: "Warnings encountered when creating the container" + type: "array" + x-nullable: false + items: + type: "string" + example: [] + + ContainerWaitResponse: + description: "OK response to ContainerWait operation" + type: "object" + x-go-name: "WaitResponse" + title: "ContainerWaitResponse" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + format: "int64" + x-nullable: false + Error: + $ref: "#/definitions/ContainerWaitExitError" + + ContainerWaitExitError: + description: "container waiting error, if any" + type: "object" + x-go-name: "WaitExitError" + properties: + Message: + description: "Details of an error" + type: "string" + SystemVersion: type: "object" description: | @@ -4396,7 +4819,6 @@ definitions: type: "string" example: "2020-06-22T15:49:27.000000000+00:00" - SystemInfo: type: "object" properties: @@ -4481,14 +4903,13 @@ definitions: description: "Indicates if the host has memory swap limit support enabled." type: "boolean" example: true - KernelMemory: + KernelMemoryTCP: description: | - Indicates if the host has kernel memory limit support enabled. - -


+ Indicates if the host has kernel memory TCP limit support enabled. This + field is omitted if not supported. - > **Deprecated**: This field is deprecated as the kernel 5.4 deprecated - > `kmem.limit_in_bytes`. + Kernel memory TCP limits are not supported when using cgroups v2, which + does not support the corresponding `memory.kmem.tcp.limit_in_bytes` cgroup. type: "boolean" example: true CpuCfsPeriod: @@ -5198,6 +5619,7 @@ definitions: PeerNode: description: "Represents a peer-node in the swarm" + type: "object" properties: NodeID: description: "Unique identifier of for this node in the swarm." @@ -5229,6 +5651,394 @@ definitions: additionalProperties: type: "string" + EventActor: + description: | + Actor describes something that generates events, like a container, network, + or a volume. + type: "object" + properties: + ID: + description: "The ID of the object emitting the event" + type: "string" + example: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Attributes: + description: | + Various key/value attributes of the object, depending on its type. + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-label-value" + image: "alpine:latest" + name: "my-container" + + EventMessage: + description: | + EventMessage represents the information an event contains. + type: "object" + title: "SystemEventsResponse" + properties: + Type: + description: "The type of object emitting the event" + type: "string" + enum: ["builder", "config", "container", "daemon", "image", "network", "node", "plugin", "secret", "service", "volume"] + example: "container" + Action: + description: "The type of event" + type: "string" + example: "create" + Actor: + $ref: "#/definitions/EventActor" + scope: + description: | + Scope of the event. Engine events are `local` scope. Cluster (Swarm) + events are `swarm` scope. + type: "string" + enum: ["local", "swarm"] + time: + description: "Timestamp of event" + type: "integer" + format: "int64" + example: 1629574695 + timeNano: + description: "Timestamp of event, with nanosecond accuracy" + type: "integer" + format: "int64" + example: 1629574695515050031 + + OCIDescriptor: + type: "object" + x-go-name: Descriptor + description: | + A descriptor struct containing digest, media type, and size, as defined in + the [OCI Content Descriptors Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/descriptor.md). + properties: + mediaType: + description: | + The media type of the object this schema refers to. + type: "string" + example: "application/vnd.docker.distribution.manifest.v2+json" + digest: + description: | + The digest of the targeted content. + type: "string" + example: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" + size: + description: | + The size in bytes of the blob. + type: "integer" + format: "int64" + example: 3987495 + # TODO Not yet including these fields for now, as they are nil / omitted in our response. + # urls: + # description: | + # List of URLs from which this object MAY be downloaded. + # type: "array" + # items: + # type: "string" + # format: "uri" + # annotations: + # description: | + # Arbitrary metadata relating to the targeted content. + # type: "object" + # additionalProperties: + # type: "string" + # platform: + # $ref: "#/definitions/OCIPlatform" + + OCIPlatform: + type: "object" + x-go-name: Platform + description: | + Describes the platform which the image in the manifest runs on, as defined + in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). + properties: + architecture: + description: | + The CPU architecture, for example `amd64` or `ppc64`. + type: "string" + example: "arm" + os: + description: | + The operating system, for example `linux` or `windows`. + type: "string" + example: "windows" + os.version: + description: | + Optional field specifying the operating system version, for example on + Windows `10.0.19041.1165`. + type: "string" + example: "10.0.19041.1165" + os.features: + description: | + Optional field specifying an array of strings, each listing a required + OS feature (for example on Windows `win32k`). + type: "array" + items: + type: "string" + example: + - "win32k" + variant: + description: | + Optional field specifying a variant of the CPU, for example `v7` to + specify ARMv7 when architecture is `arm`. + type: "string" + example: "v7" + + DistributionInspect: + type: "object" + x-go-name: DistributionInspect + title: "DistributionInspectResponse" + required: [Descriptor, Platforms] + description: | + Describes the result obtained from contacting the registry to retrieve + image metadata. + properties: + Descriptor: + $ref: "#/definitions/OCIDescriptor" + Platforms: + type: "array" + description: | + An array containing all platforms supported by the image. + items: + $ref: "#/definitions/OCIPlatform" + + ClusterVolume: + type: "object" + description: | + Options and information specific to, and only present on, Swarm CSI + cluster volumes. + properties: + ID: + type: "string" + description: | + The Swarm ID of this volume. Because cluster volumes are Swarm + objects, they have an ID, unlike non-cluster volumes. This ID can + be used to refer to the Volume instead of the name. + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + Info: + type: "object" + description: | + Information about the global status of the volume. + properties: + CapacityBytes: + type: "integer" + format: "int64" + description: | + The capacity of the volume in bytes. A value of 0 indicates that + the capacity is unknown. + VolumeContext: + type: "object" + description: | + A map of strings to strings returned from the storage plugin when + the volume is created. + additionalProperties: + type: "string" + VolumeID: + type: "string" + description: | + The ID of the volume as returned by the CSI storage plugin. This + is distinct from the volume's ID as provided by Docker. This ID + is never used by the user when communicating with Docker to refer + to this volume. If the ID is blank, then the Volume has not been + successfully created in the plugin yet. + AccessibleTopology: + type: "array" + description: | + The topology this volume is actually accessible from. + items: + $ref: "#/definitions/Topology" + PublishStatus: + type: "array" + description: | + The status of the volume as it pertains to its publishing and use on + specific nodes + items: + type: "object" + properties: + NodeID: + type: "string" + description: | + The ID of the Swarm node the volume is published on. + State: + type: "string" + description: | + The published state of the volume. + * `pending-publish` The volume should be published to this node, but the call to the controller plugin to do so has not yet been successfully completed. + * `published` The volume is published successfully to the node. + * `pending-node-unpublish` The volume should be unpublished from the node, and the manager is awaiting confirmation from the worker that it has done so. + * `pending-controller-unpublish` The volume is successfully unpublished from the node, but has not yet been successfully unpublished on the controller. + enum: + - "pending-publish" + - "published" + - "pending-node-unpublish" + - "pending-controller-unpublish" + PublishContext: + type: "object" + description: | + A map of strings to strings returned by the CSI controller + plugin when a volume is published. + additionalProperties: + type: "string" + + ClusterVolumeSpec: + type: "object" + description: | + Cluster-specific options used to create the volume. + properties: + Group: + type: "string" + description: | + Group defines the volume group of this volume. Volumes belonging to + the same group can be referred to by group name when creating + Services. Referring to a volume by group instructs Swarm to treat + volumes in that group interchangeably for the purpose of scheduling. + Volumes with an empty string for a group technically all belong to + the same, emptystring group. + AccessMode: + type: "object" + description: | + Defines how the volume is used by tasks. + properties: + Scope: + type: "string" + description: | + The set of nodes this volume can be used on at one time. + - `single` The volume may only be scheduled to one node at a time. + - `multi` the volume may be scheduled to any supported number of nodes at a time. + default: "single" + enum: ["single", "multi"] + x-nullable: false + Sharing: + type: "string" + description: | + The number and way that different tasks can use this volume + at one time. + - `none` The volume may only be used by one task at a time. + - `readonly` The volume may be used by any number of tasks, but they all must mount the volume as readonly + - `onewriter` The volume may be used by any number of tasks, but only one may mount it as read/write. + - `all` The volume may have any number of readers and writers. + default: "none" + enum: ["none", "readonly", "onewriter", "all"] + x-nullable: false + MountVolume: + type: "object" + description: | + Options for using this volume as a Mount-type volume. + + Either MountVolume or BlockVolume, but not both, must be + present. + properties: + FsType: + type: "string" + description: | + Specifies the filesystem type for the mount volume. + Optional. + MountFlags: + type: "array" + description: | + Flags to pass when mounting the volume. Optional. + items: + type: "string" + BlockVolume: + type: "object" + description: | + Options for using this volume as a Block-type volume. + Intentionally empty. + Secrets: + type: "array" + description: | + Swarm Secrets that are passed to the CSI storage plugin when + operating on this volume. + items: + type: "object" + description: | + One cluster volume secret entry. Defines a key-value pair that + is passed to the plugin. + properties: + Key: + type: "string" + description: | + Key is the name of the key of the key-value pair passed to + the plugin. + Secret: + type: "string" + description: | + Secret is the swarm Secret object from which to read data. + This can be a Secret name or ID. The Secret data is + retrieved by swarm and used as the value of the key-value + pair passed to the plugin. + AccessibilityRequirements: + type: "object" + description: | + Requirements for the accessible topology of the volume. These + fields are optional. For an in-depth description of what these + fields mean, see the CSI specification. + properties: + Requisite: + type: "array" + description: | + A list of required topologies, at least one of which the + volume must be accessible from. + items: + $ref: "#/definitions/Topology" + Preferred: + type: "array" + description: | + A list of topologies that the volume should attempt to be + provisioned in. + items: + $ref: "#/definitions/Topology" + CapacityRange: + type: "object" + description: | + The desired capacity that the volume should be created with. If + empty, the plugin will decide the capacity. + properties: + RequiredBytes: + type: "integer" + format: "int64" + description: | + The volume must be at least this big. The value of 0 + indicates an unspecified minimum + LimitBytes: + type: "integer" + format: "int64" + description: | + The volume must not be bigger than this. The value of 0 + indicates an unspecified maximum. + Availability: + type: "string" + description: | + The availability of the volume for use in tasks. + - `active` The volume is fully available for scheduling on the cluster + - `pause` No new workloads should use the volume, but existing workloads are not stopped. + - `drain` All workloads using this volume should be stopped and rescheduled, and no new ones should be started. + default: "active" + x-nullable: false + enum: + - "active" + - "pause" + - "drain" + + Topology: + description: | + A map of topological domains to topological segments. For in depth + details, see documentation for the Topology object in the CSI + specification. + type: "object" + additionalProperties: + type: "string" + paths: /containers/json: get: @@ -5291,7 +6101,9 @@ paths: 200: description: "no error" schema: - $ref: "#/definitions/ContainerSummary" + type: "array" + items: + $ref: "#/definitions/ContainerSummary" examples: application/json: - Id: "8dfafdbc3a40" @@ -5517,7 +6329,6 @@ paths: Memory: 0 MemorySwap: 0 MemoryReservation: 0 - KernelMemory: 0 NanoCpus: 500000 CpuPercent: 80 CpuShares: 512 @@ -5611,25 +6422,7 @@ paths: 201: description: "Container created successfully" schema: - type: "object" - title: "ContainerCreateResponse" - description: "OK response to ContainerCreate operation" - required: [Id, Warnings] - properties: - Id: - description: "The ID of the created container" - type: "string" - x-nullable: false - Warnings: - description: "Warnings encountered when creating the container" - type: "array" - x-nullable: false - items: - type: "string" - examples: - application/json: - Id: "e90e34656806" - Warnings: [] + $ref: "#/definitions/ContainerCreateResponse" 400: description: "bad parameter" schema: @@ -5679,7 +6472,6 @@ paths: items: type: "string" State: - x-nullable: true $ref: "#/definitions/ContainerState" Image: description: "The container's image ID" @@ -5810,7 +6602,6 @@ paths: Memory: 0 MemorySwap: 0 MemoryReservation: 0 - KernelMemory: 0 OomKillDisable: false OomScoreAdj: 500 NetworkMode: "bridge" @@ -6008,6 +6799,9 @@ paths: Note: This endpoint works only for containers with the `json-file` or `journald` logging driver. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" operationId: "ContainerLogs" responses: 200: @@ -6421,6 +7215,11 @@ paths: required: true description: "ID or name of the container" type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" - name: "t" in: "query" description: "Number of seconds to wait before killing the container" @@ -6450,6 +7249,11 @@ paths: required: true description: "ID or name of the container" type: "string" + - name: "signal" + in: "query" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). + type: "string" - name: "t" in: "query" description: "Number of seconds to wait before killing the container" @@ -6491,7 +7295,8 @@ paths: type: "string" - name: "signal" in: "query" - description: "Signal to send to the container as an integer or string (e.g. `SIGINT`)" + description: | + Signal to send to the container as an integer or string (e.g. `SIGINT`). type: "string" default: "SIGKILL" tags: ["Container"] @@ -6555,7 +7360,6 @@ paths: Memory: 314572800 MemorySwap: 514288000 MemoryReservation: 209715200 - KernelMemory: 52428800 RestartPolicy: MaximumRetryCount: 4 Name: "on-failure" @@ -6709,7 +7513,8 @@ paths: ### Stream format When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), - the stream over the hijacked connected is multiplexed to separate out + the HTTP Content-Type header is set to application/vnd.docker.multiplexed-stream + and the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload. @@ -6753,6 +7558,7 @@ paths: operationId: "ContainerAttach" produces: - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" responses: 101: description: "no error, hints proxy about hijacking" @@ -6894,22 +7700,11 @@ paths: 200: description: "The container has exit." schema: - type: "object" - title: "ContainerWaitResponse" - description: "OK response to ContainerWait operation" - required: [StatusCode] - properties: - StatusCode: - description: "Exit code of the container" - type: "integer" - x-nullable: false - Error: - description: "container waiting error, if any" - type: "object" - properties: - Message: - description: "Details of an error" - type: "string" + $ref: "#/definitions/ContainerWaitResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 404: description: "no such container" schema: @@ -6930,9 +7725,14 @@ paths: - name: "condition" in: "query" description: | - Wait until a container state reaches the given condition, either - 'not-running' (default), 'next-exit', or 'removed'. + Wait until a container state reaches the given condition. + + Defaults to `not-running` if omitted or empty. type: "string" + enum: + - "not-running" + - "next-exit" + - "removed" default: "not-running" tags: ["Container"] /containers/{id}: @@ -7004,21 +7804,11 @@ paths: type: "string" description: | A base64 - encoded JSON object with some filesystem header - information about the path - 400: - description: "Bad parameter" - schema: - allOf: - - $ref: "#/definitions/ErrorResponse" - - type: "object" - properties: - message: - description: | - The error message. Either "must specify path parameter" - (path cannot be empty) or "not a directory" (path was - asserted to be a directory but exists as a file). - type: "string" - x-nullable: false + information about the path + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" 404: description: "Container or path does not exist" schema: @@ -7053,17 +7843,7 @@ paths: 400: description: "Bad parameter" schema: - allOf: - - $ref: "#/definitions/ErrorResponse" - - type: "object" - properties: - message: - description: | - The error message. Either "must specify path parameter" - (path cannot be empty) or "not a directory" (path was - asserted to be a directory but exists as a file). - type: "string" - x-nullable: false + $ref: "#/definitions/ErrorResponse" 404: description: "Container or path does not exist" schema: @@ -7089,7 +7869,10 @@ paths: tags: ["Container"] put: summary: "Extract an archive of files or folders to a directory in a container" - description: "Upload a tar archive to be extracted to a path in the filesystem of container id." + description: | + Upload a tar archive to be extracted to a path in the filesystem of container id. + `path` parameter is asserted to be a directory. If it exists as a file, 400 error + will be returned with message "not a directory". operationId: "PutContainerArchive" consumes: ["application/x-tar", "application/octet-stream"] responses: @@ -7099,6 +7882,9 @@ paths: description: "Bad parameter" schema: $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "not a directory" 403: description: "Permission denied, the volume or container rootfs is marked as read-only." schema: @@ -7200,35 +7986,6 @@ paths: type: "array" items: $ref: "#/definitions/ImageSummary" - examples: - application/json: - - Id: "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" - ParentId: "" - RepoTags: - - "ubuntu:12.04" - - "ubuntu:precise" - RepoDigests: - - "ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787" - Created: 1474925151 - Size: 103579269 - VirtualSize: 103579269 - SharedSize: 0 - Labels: {} - Containers: 2 - - Id: "sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175" - ParentId: "" - RepoTags: - - "ubuntu:12.10" - - "ubuntu:quantal" - RepoDigests: - - "ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7" - - "ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3" - Created: 1403128455 - Size: 172064416 - VirtualSize: 172064416 - SharedSize: 0 - Labels: {} - Containers: 5 500: description: "server error" schema: @@ -7253,6 +8010,11 @@ paths: - `reference`=(`[:]`) - `since`=(`[:]`, `` or ``) type: "string" + - name: "shared-size" + in: "query" + description: "Compute and show shared size as a `SharedSize` field on each image." + type: "boolean" + default: false - name: "digests" in: "query" description: "Show digest information as a `RepoDigests` field on each image." @@ -7551,9 +8313,36 @@ paths: Refer to the [authentication section](#section/Authentication) for details. type: "string" + - name: "changes" + in: "query" + description: | + Apply `Dockerfile` instructions to the image that is created, + for example: `changes=ENV DEBUG=true`. + Note that `ENV DEBUG=true` should be URI component encoded. + + Supported `Dockerfile` instructions: + `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` + type: "array" + items: + type: "string" - name: "platform" in: "query" - description: "Platform in the format os[/arch[/variant]]" + description: | + Platform in the format os[/arch[/variant]]. + + When used in combination with the `fromImage` option, the daemon checks + if the given image is present in the local image cache with the given + OS and Architecture, and otherwise attempts to pull the image. If the + option is not set, the host's native OS and Architecture are used. + If the given image does not exist in the local image cache, the daemon + attempts to pull the image with the host's native OS and Architecture. + If the given image does exists in the local image cache, but its OS or + architecture does not match, a warning is produced. + + When used with the `fromSrc` option to import an image from an archive, + this option sets the platform information for the imported image. If + the option is not set, the host's native OS and Architecture are used + for the imported image. type: "string" default: "" tags: ["Image"] @@ -7568,84 +8357,7 @@ paths: 200: description: "No error" schema: - $ref: "#/definitions/Image" - examples: - application/json: - Id: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" - Container: "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a" - Comment: "" - Os: "linux" - Architecture: "amd64" - Parent: "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" - ContainerConfig: - Tty: false - Hostname: "e611e15f9c9d" - Domainname: "" - AttachStdout: false - PublishService: "" - AttachStdin: false - OpenStdin: false - StdinOnce: false - NetworkDisabled: false - OnBuild: [] - Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" - User: "" - WorkingDir: "" - MacAddress: "" - AttachStderr: false - Labels: - com.example.license: "GPL" - com.example.version: "1.0" - com.example.vendor: "Acme" - Env: - - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - Cmd: - - "/bin/sh" - - "-c" - - "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" - DockerVersion: "1.9.0-dev" - VirtualSize: 188359297 - Size: 0 - Author: "" - Created: "2015-09-10T08:30:53.26995814Z" - GraphDriver: - Name: "aufs" - Data: {} - RepoDigests: - - "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" - RepoTags: - - "example:1.0" - - "example:latest" - - "example:stable" - Config: - Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" - NetworkDisabled: false - OnBuild: [] - StdinOnce: false - PublishService: "" - AttachStdin: false - OpenStdin: false - Domainname: "" - AttachStdout: false - Tty: false - Hostname: "e611e15f9c9d" - Cmd: - - "/bin/bash" - Env: - - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - Labels: - com.example.vendor: "Acme" - com.example.version: "1.0" - com.example.license: "GPL" - MacAddress: "" - AttachStderr: false - WorkingDir: "" - User: "" - RootFS: - Type: "layers" - Layers: - - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" - - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + $ref: "#/definitions/ImageInspect" 404: description: "No such image" schema: @@ -8015,6 +8727,10 @@ paths: IdentityToken: "9cbaf023786cd7..." 204: description: "No error" + 401: + description: "Auth error" + schema: + $ref: "#/definitions/ErrorResponse" 500: description: "Server error" schema: @@ -8076,10 +8792,27 @@ paths: description: "Max API Version the server supports" Builder-Version: type: "string" - description: "Default version of docker image builder" + description: | + Default version of docker image builder + + The default on Linux is version "2" (BuildKit), but the daemon + can be configured to recommend version "1" (classic Builder). + Windows does not yet support BuildKit for native Windows images, + and uses "1" (classic builder) as a default. + + This value is a recommendation as advertised by the daemon, and + it is up to the client to choose which builder to use. + default: "2" Docker-Experimental: type: "boolean" description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" Cache-Control: type: "string" default: "no-cache, no-store, must-revalidate" @@ -8119,6 +8852,13 @@ paths: Docker-Experimental: type: "boolean" description: "If the server is running with experimental mode enabled" + Swarm: + type: "string" + enum: ["inactive", "pending", "error", "locked", "active/worker", "active/manager"] + description: | + Contains information about Swarm status of the daemon, + and if the daemon is acting as a manager or worker node. + default: "inactive" Cache-Control: type: "string" default: "no-cache, no-store, must-revalidate" @@ -8225,44 +8965,7 @@ paths: 200: description: "no error" schema: - type: "object" - title: "SystemEventsResponse" - properties: - Type: - description: "The type of object emitting the event" - type: "string" - Action: - description: "The type of event" - type: "string" - Actor: - type: "object" - properties: - ID: - description: "The ID of the object emitting the event" - type: "string" - Attributes: - description: "Various key/value attributes of the object, depending on its type" - type: "object" - additionalProperties: - type: "string" - time: - description: "Timestamp of event" - type: "integer" - timeNano: - description: "Timestamp of event, with nanosecond accuracy" - type: "integer" - format: "int64" - examples: - application/json: - Type: "container" - Action: "create" - Actor: - ID: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" - Attributes: - com.example.some-label: "some-label-value" - image: "alpine" - name: "my-container" - time: 1461943101 + $ref: "#/definitions/EventMessage" 400: description: "bad parameter" schema: @@ -8390,10 +9093,43 @@ paths: UsageData: Size: 10920104 RefCount: 2 + BuildCache: + - + ID: "hw53o5aio51xtltp5xjp8v7fx" + Parents: [] + Type: "regular" + Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0" + InUse: false + Shared: true + Size: 0 + CreatedAt: "2021-06-28T13:31:01.474619385Z" + LastUsedAt: "2021-07-07T22:02:32.738075951Z" + UsageCount: 26 + - + ID: "ndlpt0hhvkqcdfkputsk4cq9c" + Parents: ["ndlpt0hhvkqcdfkputsk4cq9c"] + Type: "regular" + Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache" + InUse: false + Shared: true + Size: 51 + CreatedAt: "2021-06-28T13:31:03.002625487Z" + LastUsedAt: "2021-07-07T22:02:32.773909517Z" + UsageCount: 26 500: description: "server error" schema: $ref: "#/definitions/ErrorResponse" + parameters: + - name: "type" + in: "query" + description: | + Object types, for which to compute and return data. + type: "array" + collectionFormat: multi + items: + type: "string" + enum: ["container", "image", "volume", "build-cache"] tags: ["System"] /images/{name}/get: get: @@ -8544,6 +9280,7 @@ paths: description: "Exec configuration" schema: type: "object" + title: "ExecConfig" properties: AttachStdin: type: "boolean" @@ -8554,6 +9291,15 @@ paths: AttachStderr: type: "boolean" description: "Attach to `stderr` of the exec command." + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 DetachKeys: type: "string" description: | @@ -8618,6 +9364,7 @@ paths: - "application/json" produces: - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" responses: 200: description: "No error" @@ -8634,6 +9381,7 @@ paths: in: "body" schema: type: "object" + title: "ExecStartConfig" properties: Detach: type: "boolean" @@ -8641,9 +9389,19 @@ paths: Tty: type: "boolean" description: "Allocate a pseudo-TTY." + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array." + x-nullable: true + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 example: Detach: false - Tty: false + Tty: true + ConsoleSize: [80, 64] - name: "id" in: "path" description: "Exec instance ID" @@ -8769,41 +9527,7 @@ paths: 200: description: "Summary volume data that matches the query" schema: - type: "object" - title: "VolumeListResponse" - description: "Volume list response" - required: [Volumes, Warnings] - properties: - Volumes: - type: "array" - x-nullable: false - description: "List of volumes" - items: - $ref: "#/definitions/Volume" - Warnings: - type: "array" - x-nullable: false - description: | - Warnings that occurred when fetching the list of volumes. - items: - type: "string" - - examples: - application/json: - Volumes: - - CreatedAt: "2017-07-19T12:00:26Z" - Name: "tardis" - Driver: "local" - Mountpoint: "/var/lib/docker/volumes/tardis" - Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Scope: "local" - Options: - device: "tmpfs" - o: "size=100m,uid=1000" - type: "tmpfs" - Warnings: [] + $ref: "#/definitions/VolumeListResponse" 500: description: "Server error" schema: @@ -8848,38 +9572,7 @@ paths: required: true description: "Volume configuration" schema: - type: "object" - description: "Volume configuration" - title: "VolumeConfig" - properties: - Name: - description: | - The new volume's name. If not specified, Docker generates a name. - type: "string" - x-nullable: false - Driver: - description: "Name of the volume driver to use." - type: "string" - default: "local" - x-nullable: false - DriverOpts: - description: | - A mapping of driver options and values. These options are - passed directly to the driver and are driver specific. - type: "object" - additionalProperties: - type: "string" - Labels: - description: "User-defined key/value metadata." - type: "object" - additionalProperties: - type: "string" - example: - Name: "tardis" - Labels: - com.example.some-label: "some-value" - com.example.some-other-label: "some-other-value" - Driver: "custom" + $ref: "#/definitions/VolumeCreateOptions" tags: ["Volume"] /volumes/{name}: @@ -8908,6 +9601,64 @@ paths: type: "string" tags: ["Volume"] + put: + summary: | + "Update a volume. Valid only for Swarm cluster volumes" + operationId: "VolumeUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name or ID of the volume" + type: "string" + required: true + - name: "body" + in: "body" + schema: + # though the schema for is an object that contains only a + # ClusterVolumeSpec, wrapping the ClusterVolumeSpec in this object + # means that if, later on, we support things like changing the + # labels, we can do so without duplicating that information to the + # ClusterVolumeSpec. + type: "object" + description: "Volume configuration" + properties: + Spec: + $ref: "#/definitions/ClusterVolumeSpec" + description: | + The spec of the volume to update. Currently, only Availability may + change. All other fields must remain unchanged. + - name: "version" + in: "query" + description: | + The version number of the volume being updated. This is required to + avoid conflicting writes. Found in the volume's `ClusterVolume` + field. + type: "integer" + format: "int64" + required: true + tags: ["Volume"] + delete: summary: "Remove a volume" description: "Instruct the driver to remove the volume." @@ -8939,6 +9690,7 @@ paths: type: "boolean" default: false tags: ["Volume"] + /volumes/prune: post: summary: "Delete unused volumes" @@ -8953,6 +9705,7 @@ paths: Available filters: - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. + - `all` (`all=true`) - Consider all (local) volumes for pruning and not just anonymous volumes. type: "string" responses: 200: @@ -9176,6 +9929,7 @@ paths: required: true schema: type: "object" + title: "NetworkCreateRequest" required: ["Name"] properties: Name: @@ -9286,6 +10040,7 @@ paths: required: true schema: type: "object" + title: "NetworkConnectRequest" properties: Container: type: "string" @@ -9332,6 +10087,7 @@ paths: required: true schema: type: "object" + title: "NetworkDisconnectRequest" properties: Container: type: "string" @@ -9416,20 +10172,7 @@ paths: schema: type: "array" items: - description: | - Describes a permission the user has to accept upon installing - the plugin. - type: "object" - title: "PluginPrivilegeItem" - properties: - Name: - type: "string" - Description: - type: "string" - Value: - type: "array" - items: - type: "string" + $ref: "#/definitions/PluginPrivilege" example: - Name: "network" Description: "" @@ -9505,19 +10248,7 @@ paths: schema: type: "array" items: - description: | - Describes a permission accepted by the user upon installing the - plugin. - type: "object" - properties: - Name: - type: "string" - Description: - type: "string" - Value: - type: "array" - items: - type: "string" + $ref: "#/definitions/PluginPrivilege" example: - Name: "network" Description: "" @@ -9689,19 +10420,7 @@ paths: schema: type: "array" items: - description: | - Describes a permission accepted by the user upon installing the - plugin. - type: "object" - properties: - Name: - type: "string" - Description: - type: "string" - Value: - type: "array" - items: - type: "string" + $ref: "#/definitions/PluginPrivilege" example: - Name: "network" Description: "" @@ -9991,6 +10710,7 @@ paths: required: true schema: type: "object" + title: "SwarmInitRequest" properties: ListenAddr: description: | @@ -10089,6 +10809,7 @@ paths: required: true schema: type: "object" + title: "SwarmJoinRequest" properties: ListenAddr: description: | @@ -10109,7 +10830,7 @@ paths: description: | Address or interface to use for data path traffic (format: ``), for example, `192.168.1.1`, or an interface, - like `eth0`. If `DataPathAddr` is unspecified, the same addres + like `eth0`. If `DataPathAddr` is unspecified, the same address as `AdvertiseAddr` is used. The `DataPathAddr` specifies the address that global scope @@ -10249,6 +10970,7 @@ paths: required: true schema: type: "object" + title: "SwarmUnlockRequest" properties: UnlockKey: description: "The swarm's unlock key." @@ -10611,6 +11333,9 @@ paths: **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers. + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" operationId: "ServiceLogs" responses: 200: @@ -10866,6 +11591,9 @@ paths: **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers. operationId: "TaskLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/vnd.docker.multiplexed-stream" responses: 200: description: "logs returned as a stream in response body" @@ -11360,67 +12088,7 @@ paths: 200: description: "descriptor and platform information" schema: - type: "object" - x-go-name: DistributionInspect - title: "DistributionInspectResponse" - required: [Descriptor, Platforms] - properties: - Descriptor: - type: "object" - description: | - A descriptor struct containing digest, media type, and size. - properties: - MediaType: - type: "string" - Size: - type: "integer" - format: "int64" - Digest: - type: "string" - URLs: - type: "array" - items: - type: "string" - Platforms: - type: "array" - description: | - An array containing all platforms supported by the image. - items: - type: "object" - properties: - Architecture: - type: "string" - OS: - type: "string" - OSVersion: - type: "string" - OSFeatures: - type: "array" - items: - type: "string" - Variant: - type: "string" - Features: - type: "array" - items: - type: "string" - examples: - application/json: - Descriptor: - MediaType: "application/vnd.docker.distribution.manifest.v2+json" - Digest: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" - Size: 3987495 - URLs: - - "" - Platforms: - - Architecture: "amd64" - OS: "linux" - OSVersion: "" - OSFeatures: - - "" - Variant: "" - Features: - - "" + $ref: "#/definitions/DistributionInspect" 401: description: "Failed authentication or no image found" schema: diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go index 9c464b73e2..97aca02306 100644 --- a/vendor/github.com/docker/docker/api/types/client.go +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -59,7 +59,6 @@ type ContainerExecInspect struct { // ContainerListOptions holds parameters to list containers with. type ContainerListOptions struct { - Quiet bool Size bool All bool Latest bool @@ -113,10 +112,16 @@ type NetworkListOptions struct { Filters filters.Args } +// NewHijackedResponse intializes a HijackedResponse type +func NewHijackedResponse(conn net.Conn, mediaType string) HijackedResponse { + return HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn), mediaType: mediaType} +} + // HijackedResponse holds connection information for a hijacked request. type HijackedResponse struct { - Conn net.Conn - Reader *bufio.Reader + mediaType string + Conn net.Conn + Reader *bufio.Reader } // Close closes the hijacked connection and reader. @@ -124,6 +129,15 @@ func (h *HijackedResponse) Close() { h.Conn.Close() } +// MediaType let client know if HijackedResponse hold a raw or multiplexed stream. +// returns false if HTTP Content-Type is not relevant, and container must be inspected +func (h *HijackedResponse) MediaType() (string, bool) { + if h.mediaType == "" { + return "", false + } + return h.mediaType, true +} + // CloseWriter is an interface that implements structs // that close input streams to prevent from writing. type CloseWriter interface { @@ -236,10 +250,20 @@ type ImageImportOptions struct { Platform string // Platform is the target platform of the image } -// ImageListOptions holds parameters to filter the list of images with. +// ImageListOptions holds parameters to list images with. type ImageListOptions struct { - All bool + // All controls whether all images in the graph are filtered, or just + // the heads. + All bool + + // Filters is a JSON-encoded set of filter arguments. Filters filters.Args + + // SharedSize indicates whether the shared size of images should be computed. + SharedSize bool + + // ContainerCount indicates whether container count should be computed. + ContainerCount bool } // ImageLoadResponse returns information to the client about a load process. diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go index 3dd133a3a5..7689f38b33 100644 --- a/vendor/github.com/docker/docker/api/types/configs.go +++ b/vendor/github.com/docker/docker/api/types/configs.go @@ -33,6 +33,7 @@ type ExecConfig struct { User string // User that will run the command Privileged bool // Is the container in privileged mode Tty bool // Attach standard streams to a tty. + ConsoleSize *[2]uint `json:",omitempty"` // Initial console size [height, width] AttachStdin bool // Attach the standard input, makes possible user interaction AttachStderr bool // Attach the standard error AttachStdout bool // Attach the standard output diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go index f767195b94..077583e66c 100644 --- a/vendor/github.com/docker/docker/api/types/container/config.go +++ b/vendor/github.com/docker/docker/api/types/container/config.go @@ -1,6 +1,7 @@ package container // import "github.com/docker/docker/api/types/container" import ( + "io" "time" "github.com/docker/docker/api/types/strslice" @@ -13,6 +14,24 @@ import ( // Docker interprets it as 3 nanoseconds. const MinimumDuration = 1 * time.Millisecond +// StopOptions holds the options to stop or restart a container. +type StopOptions struct { + // Signal (optional) is the signal to send to the container to (gracefully) + // stop it before forcibly terminating the container with SIGKILL after the + // timeout expires. If not value is set, the default (SIGTERM) is used. + Signal string `json:",omitempty"` + + // Timeout (optional) is the timeout (in seconds) to wait for the container + // to stop gracefully before forcibly terminating it with SIGKILL. + // + // - Use nil to use the default timeout (10 seconds). + // - Use '-1' to wait indefinitely. + // - Use '0' to not wait for the container to exit gracefully, and + // immediately proceeds to forcibly terminating the container. + // - Other positive values are used as timeout (in seconds). + Timeout *int `json:",omitempty"` +} + // HealthConfig holds configuration settings for the HEALTHCHECK feature. type HealthConfig struct { // Test is the test to perform to check that the container is healthy. @@ -34,6 +53,14 @@ type HealthConfig struct { Retries int `json:",omitempty"` } +// ExecStartOptions holds the options to start container's exec. +type ExecStartOptions struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer + ConsoleSize *[2]uint `json:",omitempty"` +} + // Config contains the configuration data about a container. // It should hold only portable information about the container. // Here, "portable" means "independent from the host we are running on". diff --git a/vendor/github.com/docker/docker/api/types/container/container_create.go b/vendor/github.com/docker/docker/api/types/container/container_create.go deleted file mode 100644 index d0c852f84d..0000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_create.go +++ /dev/null @@ -1,20 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerCreateCreatedBody OK response to ContainerCreate operation -// swagger:model ContainerCreateCreatedBody -type ContainerCreateCreatedBody struct { - - // The ID of the created container - // Required: true - ID string `json:"Id"` - - // Warnings encountered when creating the container - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_wait.go b/vendor/github.com/docker/docker/api/types/container/container_wait.go deleted file mode 100644 index 49e05ae669..0000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_wait.go +++ /dev/null @@ -1,28 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerWaitOKBodyError container waiting error, if any -// swagger:model ContainerWaitOKBodyError -type ContainerWaitOKBodyError struct { - - // Details of an error - Message string `json:"Message,omitempty"` -} - -// ContainerWaitOKBody OK response to ContainerWait operation -// swagger:model ContainerWaitOKBody -type ContainerWaitOKBody struct { - - // error - // Required: true - Error *ContainerWaitOKBodyError `json:"Error"` - - // Exit code of the container - // Required: true - StatusCode int64 `json:"StatusCode"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/create_response.go b/vendor/github.com/docker/docker/api/types/container/create_response.go new file mode 100644 index 0000000000..aa0e7f7d07 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/create_response.go @@ -0,0 +1,19 @@ +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// CreateResponse ContainerCreateResponse +// +// OK response to ContainerCreate operation +// swagger:model CreateResponse +type CreateResponse struct { + + // The ID of the created container + // Required: true + ID string `json:"Id"` + + // Warnings encountered when creating the container + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/deprecated.go b/vendor/github.com/docker/docker/api/types/container/deprecated.go new file mode 100644 index 0000000000..0cb70e3638 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/deprecated.go @@ -0,0 +1,16 @@ +package container // import "github.com/docker/docker/api/types/container" + +// ContainerCreateCreatedBody OK response to ContainerCreate operation +// +// Deprecated: use CreateResponse +type ContainerCreateCreatedBody = CreateResponse + +// ContainerWaitOKBody OK response to ContainerWait operation +// +// Deprecated: use WaitResponse +type ContainerWaitOKBody = WaitResponse + +// ContainerWaitOKBodyError container waiting error, if any +// +// Deprecated: use WaitExitError +type ContainerWaitOKBodyError = WaitExitError diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go index 2d1cbaa9ab..100f434ce7 100644 --- a/vendor/github.com/docker/docker/api/types/container/host_config.go +++ b/vendor/github.com/docker/docker/api/types/container/host_config.go @@ -13,19 +13,26 @@ import ( // CgroupnsMode represents the cgroup namespace mode of the container type CgroupnsMode string +// cgroup namespace modes for containers +const ( + CgroupnsModeEmpty CgroupnsMode = "" + CgroupnsModePrivate CgroupnsMode = "private" + CgroupnsModeHost CgroupnsMode = "host" +) + // IsPrivate indicates whether the container uses its own private cgroup namespace func (c CgroupnsMode) IsPrivate() bool { - return c == "private" + return c == CgroupnsModePrivate } // IsHost indicates whether the container shares the host's cgroup namespace func (c CgroupnsMode) IsHost() bool { - return c == "host" + return c == CgroupnsModeHost } // IsEmpty indicates whether the container cgroup namespace mode is unset func (c CgroupnsMode) IsEmpty() bool { - return c == "" + return c == CgroupnsModeEmpty } // Valid indicates whether the cgroup namespace mode is valid @@ -37,60 +44,69 @@ func (c CgroupnsMode) Valid() bool { // values are platform specific type Isolation string +// Isolation modes for containers +const ( + IsolationEmpty Isolation = "" // IsolationEmpty is unspecified (same behavior as default) + IsolationDefault Isolation = "default" // IsolationDefault is the default isolation mode on current daemon + IsolationProcess Isolation = "process" // IsolationProcess is process isolation mode + IsolationHyperV Isolation = "hyperv" // IsolationHyperV is HyperV isolation mode +) + // IsDefault indicates the default isolation technology of a container. On Linux this // is the native driver. On Windows, this is a Windows Server Container. func (i Isolation) IsDefault() bool { - return strings.ToLower(string(i)) == "default" || string(i) == "" + // TODO consider making isolation-mode strict (case-sensitive) + v := Isolation(strings.ToLower(string(i))) + return v == IsolationDefault || v == IsolationEmpty } // IsHyperV indicates the use of a Hyper-V partition for isolation func (i Isolation) IsHyperV() bool { - return strings.ToLower(string(i)) == "hyperv" + // TODO consider making isolation-mode strict (case-sensitive) + return Isolation(strings.ToLower(string(i))) == IsolationHyperV } // IsProcess indicates the use of process isolation func (i Isolation) IsProcess() bool { - return strings.ToLower(string(i)) == "process" + // TODO consider making isolation-mode strict (case-sensitive) + return Isolation(strings.ToLower(string(i))) == IsolationProcess } -const ( - // IsolationEmpty is unspecified (same behavior as default) - IsolationEmpty = Isolation("") - // IsolationDefault is the default isolation mode on current daemon - IsolationDefault = Isolation("default") - // IsolationProcess is process isolation mode - IsolationProcess = Isolation("process") - // IsolationHyperV is HyperV isolation mode - IsolationHyperV = Isolation("hyperv") -) - // IpcMode represents the container ipc stack. type IpcMode string +// IpcMode constants +const ( + IPCModeNone IpcMode = "none" + IPCModeHost IpcMode = "host" + IPCModeContainer IpcMode = "container" + IPCModePrivate IpcMode = "private" + IPCModeShareable IpcMode = "shareable" +) + // IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared. func (n IpcMode) IsPrivate() bool { - return n == "private" + return n == IPCModePrivate } // IsHost indicates whether the container shares the host's ipc namespace. func (n IpcMode) IsHost() bool { - return n == "host" + return n == IPCModeHost } // IsShareable indicates whether the container's ipc namespace can be shared with another container. func (n IpcMode) IsShareable() bool { - return n == "shareable" + return n == IPCModeShareable } // IsContainer indicates whether the container uses another container's ipc namespace. func (n IpcMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" + return strings.HasPrefix(string(n), string(IPCModeContainer)+":") } // IsNone indicates whether container IpcMode is set to "none". func (n IpcMode) IsNone() bool { - return n == "none" + return n == IPCModeNone } // IsEmpty indicates whether container IpcMode is empty @@ -105,9 +121,8 @@ func (n IpcMode) Valid() bool { // Container returns the name of the container ipc stack is going to be used. func (n IpcMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 && parts[0] == "container" { - return parts[1] + if n.IsContainer() { + return strings.TrimPrefix(string(n), string(IPCModeContainer)+":") } return "" } @@ -326,7 +341,7 @@ type LogMode string // Available logging modes const ( - LogModeUnset = "" + LogModeUnset LogMode = "" LogModeBlocking LogMode = "blocking" LogModeNonBlock LogMode = "non-blocking" ) @@ -361,14 +376,17 @@ type Resources struct { Devices []DeviceMapping // List of devices to map inside the container DeviceCgroupRules []string // List of rule to be added to the device cgroup DeviceRequests []DeviceRequest // List of device requests for device drivers - KernelMemory int64 // Kernel memory limit (in bytes), Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes - KernelMemoryTCP int64 // Hard limit for kernel TCP buffer memory (in bytes) - MemoryReservation int64 // Memory soft limit (in bytes) - MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap - MemorySwappiness *int64 // Tuning container memory swappiness behaviour - OomKillDisable *bool // Whether to disable OOM Killer or not - PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. - Ulimits []*units.Ulimit // List of ulimits to be set in the container + + // KernelMemory specifies the kernel memory limit (in bytes) for the container. + // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes. + KernelMemory int64 `json:",omitempty"` + KernelMemoryTCP int64 `json:",omitempty"` // Hard limit for kernel TCP buffer memory (in bytes) + MemoryReservation int64 // Memory soft limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + OomKillDisable *bool // Whether to disable OOM Killer or not + PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. + Ulimits []*units.Ulimit // List of ulimits to be set in the container // Applicable to Windows CPUCount int64 `json:"CpuCount"` // CPU count @@ -399,6 +417,7 @@ type HostConfig struct { AutoRemove bool // Automatically remove container when it exits VolumeDriver string // Name of the volume driver used to mount volumes VolumesFrom []string // List of volumes to take from other container + ConsoleSize [2]uint // Initial console size (height,width) // Applicable to UNIX platforms CapAdd strslice.StrSlice // List of kernel capabilities to add to the container @@ -427,8 +446,7 @@ type HostConfig struct { Runtime string `json:",omitempty"` // Runtime to use with this container // Applicable to Windows - ConsoleSize [2]uint // Initial console size (height,width) - Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) + Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) // Contains container's resources (cgroups, ulimits) Resources diff --git a/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go b/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go new file mode 100644 index 0000000000..ab56d4eed8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/wait_exit_error.go @@ -0,0 +1,12 @@ +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// WaitExitError container waiting error, if any +// swagger:model WaitExitError +type WaitExitError struct { + + // Details of an error + Message string `json:"Message,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/wait_response.go b/vendor/github.com/docker/docker/api/types/container/wait_response.go new file mode 100644 index 0000000000..84fc6afddc --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/wait_response.go @@ -0,0 +1,18 @@ +package container + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// WaitResponse ContainerWaitResponse +// +// OK response to ContainerWait operation +// swagger:model WaitResponse +type WaitResponse struct { + + // error + Error *WaitExitError `json:"Error,omitempty"` + + // Exit code of the container + // Required: true + StatusCode int64 `json:"StatusCode"` +} diff --git a/vendor/github.com/docker/docker/api/types/deprecated.go b/vendor/github.com/docker/docker/api/types/deprecated.go new file mode 100644 index 0000000000..216d1df0ff --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/deprecated.go @@ -0,0 +1,14 @@ +package types // import "github.com/docker/docker/api/types" + +import "github.com/docker/docker/api/types/volume" + +// Volume volume +// +// Deprecated: use github.com/docker/docker/api/types/volume.Volume +type Volume = volume.Volume + +// VolumeUsageData Usage details about the volume. This information is used by the +// `GET /system/df` endpoint, and omitted in other endpoints. +// +// Deprecated: use github.com/docker/docker/api/types/volume.UsageData +type VolumeUsageData = volume.UsageData diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go index aa8fba8154..9fe07e26fd 100644 --- a/vendor/github.com/docker/docker/api/types/events/events.go +++ b/vendor/github.com/docker/docker/api/types/events/events.go @@ -1,33 +1,26 @@ package events // import "github.com/docker/docker/api/types/events" +// Type is used for event-types. +type Type = string + +// List of known event types. const ( - // BuilderEventType is the event type that the builder generates - BuilderEventType = "builder" - // ContainerEventType is the event type that containers generate - ContainerEventType = "container" - // DaemonEventType is the event type that daemon generate - DaemonEventType = "daemon" - // ImageEventType is the event type that images generate - ImageEventType = "image" - // NetworkEventType is the event type that networks generate - NetworkEventType = "network" - // PluginEventType is the event type that plugins generate - PluginEventType = "plugin" - // VolumeEventType is the event type that volumes generate - VolumeEventType = "volume" - // ServiceEventType is the event type that services generate - ServiceEventType = "service" - // NodeEventType is the event type that nodes generate - NodeEventType = "node" - // SecretEventType is the event type that secrets generate - SecretEventType = "secret" - // ConfigEventType is the event type that configs generate - ConfigEventType = "config" + BuilderEventType Type = "builder" // BuilderEventType is the event type that the builder generates. + ConfigEventType Type = "config" // ConfigEventType is the event type that configs generate. + ContainerEventType Type = "container" // ContainerEventType is the event type that containers generate. + DaemonEventType Type = "daemon" // DaemonEventType is the event type that daemon generate. + ImageEventType Type = "image" // ImageEventType is the event type that images generate. + NetworkEventType Type = "network" // NetworkEventType is the event type that networks generate. + NodeEventType Type = "node" // NodeEventType is the event type that nodes generate. + PluginEventType Type = "plugin" // PluginEventType is the event type that plugins generate. + SecretEventType Type = "secret" // SecretEventType is the event type that secrets generate. + ServiceEventType Type = "service" // ServiceEventType is the event type that services generate. + VolumeEventType Type = "volume" // VolumeEventType is the event type that volumes generate. ) // Actor describes something that generates events, // like a container, or a network, or a volume. -// It has a defined name and a set or attributes. +// It has a defined name and a set of attributes. // The container attributes are its labels, other actors // can generate these attributes from other properties. type Actor struct { @@ -39,11 +32,11 @@ type Actor struct { type Message struct { // Deprecated information from JSONMessage. // With data only in container events. - Status string `json:"status,omitempty"` - ID string `json:"id,omitempty"` - From string `json:"from,omitempty"` + Status string `json:"status,omitempty"` // Deprecated: use Action instead. + ID string `json:"id,omitempty"` // Deprecated: use Actor.ID instead. + From string `json:"from,omitempty"` // Deprecated: use Actor.Attributes["image"] instead. - Type string + Type Type Action string Actor Actor // Engine events are local scope. Cluster events are swarm scope. diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go index 4bc91cffd6..f8fe794074 100644 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -1,4 +1,5 @@ -/*Package filters provides tools for encoding a mapping of keys to a set of +/* +Package filters provides tools for encoding a mapping of keys to a set of multiple values. */ package filters // import "github.com/docker/docker/api/types/filters" @@ -9,6 +10,7 @@ import ( "strings" "github.com/docker/docker/api/types/versions" + "github.com/pkg/errors" ) // Args stores a mapping of keys to a set of multiple values. @@ -48,7 +50,7 @@ func (args Args) Keys() []string { // MarshalJSON returns a JSON byte representation of the Args func (args Args) MarshalJSON() ([]byte, error) { if len(args.fields) == 0 { - return []byte{}, nil + return []byte("{}"), nil } return json.Marshal(args.fields) } @@ -97,7 +99,7 @@ func FromJSON(p string) (Args, error) { // Fallback to parsing arguments in the legacy slice format deprecated := map[string][]string{} if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil { - return args, err + return args, invalidFilter{errors.Wrap(err, "invalid filter")} } args.fields = deprecatedArgs(deprecated) @@ -106,9 +108,6 @@ func FromJSON(p string) (Args, error) { // UnmarshalJSON populates the Args from JSON encode bytes func (args Args) UnmarshalJSON(raw []byte) error { - if len(raw) == 0 { - return nil - } return json.Unmarshal(raw, &args.fields) } @@ -247,10 +246,10 @@ func (args Args) Contains(field string) bool { return ok } -type invalidFilter string +type invalidFilter struct{ error } func (e invalidFilter) Error() string { - return "Invalid filter '" + string(e) + "'" + return e.error.Error() } func (invalidFilter) InvalidParameter() {} @@ -260,7 +259,7 @@ func (invalidFilter) InvalidParameter() {} func (args Args) Validate(accepted map[string]bool) error { for name := range args.fields { if !accepted[name] { - return invalidFilter(name) + return invalidFilter{errors.New("invalid filter '" + name + "'")} } } return nil diff --git a/vendor/github.com/docker/docker/api/types/graph_driver_data.go b/vendor/github.com/docker/docker/api/types/graph_driver_data.go index 4d9bf1c62c..ce3deb331c 100644 --- a/vendor/github.com/docker/docker/api/types/graph_driver_data.go +++ b/vendor/github.com/docker/docker/api/types/graph_driver_data.go @@ -3,15 +3,21 @@ package types // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command -// GraphDriverData Information about a container's graph driver. +// GraphDriverData Information about the storage driver used to store the container's and +// image's filesystem. +// // swagger:model GraphDriverData type GraphDriverData struct { - // data + // Low-level storage metadata, provided as key/value pairs. + // + // This information is driver-specific, and depends on the storage-driver + // in use, and should be used for informational purposes only. + // // Required: true Data map[string]string `json:"Data"` - // name + // Name of the storage driver. // Required: true Name string `json:"Name"` } diff --git a/vendor/github.com/docker/docker/api/types/image_summary.go b/vendor/github.com/docker/docker/api/types/image_summary.go index e145b3dcfc..90b983a25c 100644 --- a/vendor/github.com/docker/docker/api/types/image_summary.go +++ b/vendor/github.com/docker/docker/api/types/image_summary.go @@ -7,43 +7,91 @@ package types // swagger:model ImageSummary type ImageSummary struct { - // containers + // Number of containers using this image. Includes both stopped and running + // containers. + // + // This size is not calculated by default, and depends on which API endpoint + // is used. `-1` indicates that the value has not been set / calculated. + // // Required: true Containers int64 `json:"Containers"` - // created + // Date and time at which the image was created as a Unix timestamp + // (number of seconds sinds EPOCH). + // // Required: true Created int64 `json:"Created"` - // Id + // ID is the content-addressable ID of an image. + // + // This identifier is a content-addressable digest calculated from the + // image's configuration (which includes the digests of layers used by + // the image). + // + // Note that this digest differs from the `RepoDigests` below, which + // holds digests of image manifests that reference the image. + // // Required: true ID string `json:"Id"` - // labels + // User-defined key/value metadata. // Required: true Labels map[string]string `json:"Labels"` - // parent Id + // ID of the parent image. + // + // Depending on how the image was created, this field may be empty and + // is only set for images that were built/created locally. This field + // is empty if the image was pulled from an image registry. + // // Required: true ParentID string `json:"ParentId"` - // repo digests + // List of content-addressable digests of locally available image manifests + // that the image is referenced from. Multiple manifests can refer to the + // same image. + // + // These digests are usually only available if the image was either pulled + // from a registry, or if the image was pushed to a registry, which is when + // the manifest is generated and its digest calculated. + // // Required: true RepoDigests []string `json:"RepoDigests"` - // repo tags + // List of image names/tags in the local image cache that reference this + // image. + // + // Multiple image tags can refer to the same image, and this list may be + // empty if no tags reference the image, in which case the image is + // "untagged", in which case it can still be referenced by its ID. + // // Required: true RepoTags []string `json:"RepoTags"` - // shared size + // Total size of image layers that are shared between this image and other + // images. + // + // This size is not calculated by default. `-1` indicates that the value + // has not been set / calculated. + // // Required: true SharedSize int64 `json:"SharedSize"` - // size + // Total size of the image including all layers it is composed of. + // // Required: true Size int64 `json:"Size"` - // virtual size + // Total size of the image including all layers it is composed of. + // + // In versions of Docker before v1.10, this field was calculated from + // the image itself and all of its parent images. Docker v1.10 and up + // store images self-contained, and no longer use a parent-chain, making + // this field an equivalent of the Size field. + // + // This field is kept for backward compatibility, but may be removed in + // a future version of the API. + // // Required: true VirtualSize int64 `json:"VirtualSize"` } diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go index 443b8d07a9..ac4ce62231 100644 --- a/vendor/github.com/docker/docker/api/types/mount/mount.go +++ b/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -17,6 +17,8 @@ const ( TypeTmpfs Type = "tmpfs" // TypeNamedPipe is the type for mounting Windows named pipes TypeNamedPipe Type = "npipe" + // TypeCluster is the type for Swarm Cluster Volumes. + TypeCluster Type = "cluster" ) // Mount represents a mount (volume). @@ -30,9 +32,10 @@ type Mount struct { ReadOnly bool `json:",omitempty"` Consistency Consistency `json:",omitempty"` - BindOptions *BindOptions `json:",omitempty"` - VolumeOptions *VolumeOptions `json:",omitempty"` - TmpfsOptions *TmpfsOptions `json:",omitempty"` + BindOptions *BindOptions `json:",omitempty"` + VolumeOptions *VolumeOptions `json:",omitempty"` + TmpfsOptions *TmpfsOptions `json:",omitempty"` + ClusterOptions *ClusterOptions `json:",omitempty"` } // Propagation represents the propagation of a mount. @@ -79,8 +82,9 @@ const ( // BindOptions defines options specific to mounts of type "bind". type BindOptions struct { - Propagation Propagation `json:",omitempty"` - NonRecursive bool `json:",omitempty"` + Propagation Propagation `json:",omitempty"` + NonRecursive bool `json:",omitempty"` + CreateMountpoint bool `json:",omitempty"` } // VolumeOptions represents the options for a mount of type volume. @@ -129,3 +133,8 @@ type TmpfsOptions struct { // Some of these may be straightforward to add, but others, such as // uid/gid have implications in a clustered system. } + +// ClusterOptions specifies options for a Cluster volume. +type ClusterOptions struct { + // intentionally empty +} diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go index 53e47084c8..62a88f5be8 100644 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -45,31 +45,32 @@ func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { // IndexInfo contains information about a registry // // RepositoryInfo Examples: -// { -// "Index" : { -// "Name" : "docker.io", -// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], -// "Secure" : true, -// "Official" : true, -// }, -// "RemoteName" : "library/debian", -// "LocalName" : "debian", -// "CanonicalName" : "docker.io/debian" -// "Official" : true, -// } // -// { -// "Index" : { -// "Name" : "127.0.0.1:5000", -// "Mirrors" : [], -// "Secure" : false, -// "Official" : false, -// }, -// "RemoteName" : "user/repo", -// "LocalName" : "127.0.0.1:5000/user/repo", -// "CanonicalName" : "127.0.0.1:5000/user/repo", -// "Official" : false, -// } +// { +// "Index" : { +// "Name" : "docker.io", +// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], +// "Secure" : true, +// "Official" : true, +// }, +// "RemoteName" : "library/debian", +// "LocalName" : "debian", +// "CanonicalName" : "docker.io/debian" +// "Official" : true, +// } +// +// { +// "Index" : { +// "Name" : "127.0.0.1:5000", +// "Mirrors" : [], +// "Secure" : false, +// "Official" : false, +// }, +// "RemoteName" : "user/repo", +// "LocalName" : "127.0.0.1:5000/user/repo", +// "CanonicalName" : "127.0.0.1:5000/user/repo", +// "Official" : false, +// } type IndexInfo struct { // Name is the name of the registry, such as "docker.io" Name string diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go index ef020f458b..5ded7dba8a 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/common.go +++ b/vendor/github.com/docker/docker/api/types/swarm/common.go @@ -1,12 +1,20 @@ package swarm // import "github.com/docker/docker/api/types/swarm" -import "time" +import ( + "strconv" + "time" +) // Version represents the internal object version. type Version struct { Index uint64 `json:",omitempty"` } +// String implements fmt.Stringer interface. +func (v Version) String() string { + return strconv.FormatUint(v.Index, 10) +} + // Meta is a base object inherited by most of the other once. type Meta struct { Version Version `json:",omitempty"` diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go index 1e30f5fa10..bb98d5eedc 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/node.go +++ b/vendor/github.com/docker/docker/api/types/swarm/node.go @@ -53,6 +53,7 @@ type NodeDescription struct { Resources Resources `json:",omitempty"` Engine EngineDescription `json:",omitempty"` TLSInfo TLSInfo `json:",omitempty"` + CSIInfo []NodeCSIInfo `json:",omitempty"` } // Platform represents the platform (Arch/OS). @@ -68,6 +69,21 @@ type EngineDescription struct { Plugins []PluginDescription `json:",omitempty"` } +// NodeCSIInfo represents information about a CSI plugin available on the node +type NodeCSIInfo struct { + // PluginName is the name of the CSI plugin. + PluginName string `json:",omitempty"` + // NodeID is the ID of the node as reported by the CSI plugin. This is + // different from the swarm node ID. + NodeID string `json:",omitempty"` + // MaxVolumesPerNode is the maximum number of volumes that may be published + // to this node + MaxVolumesPerNode int64 `json:",omitempty"` + // AccessibleTopology indicates the location of this node in the CSI + // plugin's topology + AccessibleTopology *Topology `json:",omitempty"` +} + // PluginDescription represents the description of an engine plugin. type PluginDescription struct { Type string `json:",omitempty"` @@ -113,3 +129,11 @@ const ( // NodeStateDisconnected DISCONNECTED NodeStateDisconnected NodeState = "disconnected" ) + +// Topology defines the CSI topology of this node. This type is a duplicate of +// github.com/docker/docker/api/types.Topology. Because the type definition +// is so simple and to avoid complicated structure or circular imports, we just +// duplicate it here. See that type for full documentation +type Topology struct { + Segments map[string]string `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go index b25f999646..3eae4b9b29 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -213,6 +213,16 @@ type Info struct { Warnings []string `json:",omitempty"` } +// Status provides information about the current swarm status and role, +// obtained from the "Swarm" header in the API response. +type Status struct { + // NodeState represents the state of the node. + NodeState LocalNodeState + + // ControlAvailable indicates if the node is a swarm manager. + ControlAvailable bool +} + // Peer represents a peer. type Peer struct { NodeID string diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go index a6f7ab7b5c..ad3eeca0b7 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/task.go +++ b/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -62,6 +62,11 @@ type Task struct { // used to determine which Tasks belong to which run of the job. This field // is absent if the Service mode is Replicated or Global. JobIteration *Version `json:",omitempty"` + + // Volumes is the list of VolumeAttachments for this task. It specifies + // which particular volumes are to be used by this particular task, and + // fulfilling what mounts in the spec. + Volumes []VolumeAttachment } // TaskSpec represents the spec of a task. @@ -204,3 +209,17 @@ type ContainerStatus struct { type PortStatus struct { Ports []PortConfig `json:",omitempty"` } + +// VolumeAttachment contains the associating a Volume to a Task. +type VolumeAttachment struct { + // ID is the Swarmkit ID of the Volume. This is not the CSI VolumeId. + ID string `json:",omitempty"` + + // Source, together with Target, indicates the Mount, as specified in the + // ContainerSpec, that this volume fulfills. + Source string `json:",omitempty"` + + // Target, together with Source, indicates the Mount, as specified + // in the ContainerSpec, that this volume fulfills. + Target string `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/time/duration_convert.go b/vendor/github.com/docker/docker/api/types/time/duration_convert.go deleted file mode 100644 index 84b6f07322..0000000000 --- a/vendor/github.com/docker/docker/api/types/time/duration_convert.go +++ /dev/null @@ -1,12 +0,0 @@ -package time // import "github.com/docker/docker/api/types/time" - -import ( - "strconv" - "time" -) - -// DurationToSecondsString converts the specified duration to the number -// seconds it represents, formatted as a string. -func DurationToSecondsString(duration time.Duration) string { - return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64) -} diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/docker/docker/api/types/time/timestamp.go index ea3495efeb..2a74b7a597 100644 --- a/vendor/github.com/docker/docker/api/types/time/timestamp.go +++ b/vendor/github.com/docker/docker/api/types/time/timestamp.go @@ -100,8 +100,10 @@ func GetTimestamp(value string, reference time.Time) (string, error) { // if the incoming nanosecond portion is longer or shorter than 9 digits it is // converted to nanoseconds. The expectation is that the seconds and // seconds will be used to create a time variable. For example: -// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) -// if err == nil since := time.Unix(seconds, nanoseconds) +// +// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) +// if err == nil since := time.Unix(seconds, nanoseconds) +// // returns seconds as def(aultSeconds) if value == "" func ParseTimestamps(value string, def int64) (int64, int64, error) { if value == "" { diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index e3a159912e..036405299e 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -14,43 +14,136 @@ import ( "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/volume" "github.com/docker/go-connections/nat" ) +const ( + // MediaTypeRawStream is vendor specific MIME-Type set for raw TTY streams + MediaTypeRawStream = "application/vnd.docker.raw-stream" + + // MediaTypeMultiplexedStream is vendor specific MIME-Type set for stdin/stdout/stderr multiplexed streams + MediaTypeMultiplexedStream = "application/vnd.docker.multiplexed-stream" +) + // RootFS returns Image's RootFS description including the layer IDs. type RootFS struct { - Type string - Layers []string `json:",omitempty"` - BaseLayer string `json:",omitempty"` + Type string `json:",omitempty"` + Layers []string `json:",omitempty"` } // ImageInspect contains response of Engine API: // GET "/images/{name:.*}/json" type ImageInspect struct { - ID string `json:"Id"` - RepoTags []string - RepoDigests []string - Parent string - Comment string - Created string - Container string + // ID is the content-addressable ID of an image. + // + // This identifier is a content-addressable digest calculated from the + // image's configuration (which includes the digests of layers used by + // the image). + // + // Note that this digest differs from the `RepoDigests` below, which + // holds digests of image manifests that reference the image. + ID string `json:"Id"` + + // RepoTags is a list of image names/tags in the local image cache that + // reference this image. + // + // Multiple image tags can refer to the same image, and this list may be + // empty if no tags reference the image, in which case the image is + // "untagged", in which case it can still be referenced by its ID. + RepoTags []string + + // RepoDigests is a list of content-addressable digests of locally available + // image manifests that the image is referenced from. Multiple manifests can + // refer to the same image. + // + // These digests are usually only available if the image was either pulled + // from a registry, or if the image was pushed to a registry, which is when + // the manifest is generated and its digest calculated. + RepoDigests []string + + // Parent is the ID of the parent image. + // + // Depending on how the image was created, this field may be empty and + // is only set for images that were built/created locally. This field + // is empty if the image was pulled from an image registry. + Parent string + + // Comment is an optional message that can be set when committing or + // importing the image. + Comment string + + // Created is the date and time at which the image was created, formatted in + // RFC 3339 nano-seconds (time.RFC3339Nano). + Created string + + // Container is the ID of the container that was used to create the image. + // + // Depending on how the image was created, this field may be empty. + Container string + + // ContainerConfig is an optional field containing the configuration of the + // container that was last committed when creating the image. + // + // Previous versions of Docker builder used this field to store build cache, + // and it is not in active use anymore. ContainerConfig *container.Config - DockerVersion string - Author string - Config *container.Config - Architecture string - Variant string `json:",omitempty"` - Os string - OsVersion string `json:",omitempty"` - Size int64 - VirtualSize int64 - GraphDriver GraphDriverData - RootFS RootFS - Metadata ImageMetadata + + // DockerVersion is the version of Docker that was used to build the image. + // + // Depending on how the image was created, this field may be empty. + DockerVersion string + + // Author is the name of the author that was specified when committing the + // image, or as specified through MAINTAINER (deprecated) in the Dockerfile. + Author string + Config *container.Config + + // Architecture is the hardware CPU architecture that the image runs on. + Architecture string + + // Variant is the CPU architecture variant (presently ARM-only). + Variant string `json:",omitempty"` + + // OS is the Operating System the image is built to run on. + Os string + + // OsVersion is the version of the Operating System the image is built to + // run on (especially for Windows). + OsVersion string `json:",omitempty"` + + // Size is the total size of the image including all layers it is composed of. + Size int64 + + // VirtualSize is the total size of the image including all layers it is + // composed of. + // + // In versions of Docker before v1.10, this field was calculated from + // the image itself and all of its parent images. Docker v1.10 and up + // store images self-contained, and no longer use a parent-chain, making + // this field an equivalent of the Size field. + // + // This field is kept for backward compatibility, but may be removed in + // a future version of the API. + VirtualSize int64 // TODO(thaJeztah): deprecate this field + + // GraphDriver holds information about the storage driver used to store the + // container's and image's filesystem. + GraphDriver GraphDriverData + + // RootFS contains information about the image's RootFS, including the + // layer IDs. + RootFS RootFS + + // Metadata of the image in the local cache. + // + // This information is local to the daemon, and not part of the image itself. + Metadata ImageMetadata } // ImageMetadata contains engine-local data about the image type ImageMetadata struct { + // LastTagTime is the date and time at which the image was last tagged. LastTagTime time.Time `json:",omitempty"` } @@ -107,6 +200,15 @@ type Ping struct { OSType string Experimental bool BuilderVersion BuilderVersion + + // SwarmStatus provides information about the current swarm status of the + // engine, obtained from the "Swarm" header in the API response. + // + // It can be a nil struct if the API version does not provide this header + // in the ping response, or if an error occurred, in which case the client + // should use other ways to get the current swarm status, such as the /swarm + // endpoint. + SwarmStatus *swarm.Status } // ComponentVersion describes the version information for a specific component. @@ -158,8 +260,8 @@ type Info struct { Plugins PluginsInfo MemoryLimit bool SwapLimit bool - KernelMemory bool // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes - KernelMemoryTCP bool + KernelMemory bool `json:",omitempty"` // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes + KernelMemoryTCP bool `json:",omitempty"` // KernelMemoryTCP is not supported on cgroups v2. CPUCfsPeriod bool `json:"CpuCfsPeriod"` CPUCfsQuota bool `json:"CpuCfsQuota"` CPUShares bool @@ -212,7 +314,12 @@ type Info struct { SecurityOptions []string ProductLicense string `json:",omitempty"` DefaultAddressPools []NetworkAddressPool `json:",omitempty"` - Warnings []string + + // Warnings contains a slice of warnings that occurred while collecting + // system information. These warnings are intended to be informational + // messages for the user, and are not intended to be parsed / used for + // other purposes, as they do not have a fixed format. + Warnings []string } // KeyValue holds a key/value pair @@ -283,6 +390,8 @@ type ExecStartCheck struct { Detach bool // Check if there's a tty Tty bool + // Terminal size [height, width], unused if Tty == false + ConsoleSize *[2]uint `json:",omitempty"` } // HealthcheckResult stores information about a single run of a healthcheck probe @@ -416,13 +525,44 @@ type DefaultNetworkSettings struct { // MountPoint represents a mount point configuration inside the container. // This is used for reporting the mountpoints in use by a container. type MountPoint struct { - Type mount.Type `json:",omitempty"` - Name string `json:",omitempty"` - Source string + // Type is the type of mount, see `Type` definitions in + // github.com/docker/docker/api/types/mount.Type + Type mount.Type `json:",omitempty"` + + // Name is the name reference to the underlying data defined by `Source` + // e.g., the volume name. + Name string `json:",omitempty"` + + // Source is the source location of the mount. + // + // For volumes, this contains the storage location of the volume (within + // `/var/lib/docker/volumes/`). For bind-mounts, and `npipe`, this contains + // the source (host) part of the bind-mount. For `tmpfs` mount points, this + // field is empty. + Source string + + // Destination is the path relative to the container root (`/`) where the + // Source is mounted inside the container. Destination string - Driver string `json:",omitempty"` - Mode string - RW bool + + // Driver is the volume driver used to create the volume (if it is a volume). + Driver string `json:",omitempty"` + + // Mode is a comma separated list of options supplied by the user when + // creating the bind/volume mount. + // + // The default is platform-specific (`"z"` on Linux, empty on Windows). + Mode string + + // RW indicates whether the mount is mounted writable (read-write). + RW bool + + // Propagation describes how mounts are propagated from the host into the + // mount point, and vice-versa. Refer to the Linux kernel documentation + // for details: + // https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt + // + // This field is not used on Windows. Propagation mount.Propagation } @@ -530,15 +670,36 @@ type ShimConfig struct { Opts interface{} } +// DiskUsageObject represents an object type used for disk usage query filtering. +type DiskUsageObject string + +const ( + // ContainerObject represents a container DiskUsageObject. + ContainerObject DiskUsageObject = "container" + // ImageObject represents an image DiskUsageObject. + ImageObject DiskUsageObject = "image" + // VolumeObject represents a volume DiskUsageObject. + VolumeObject DiskUsageObject = "volume" + // BuildCacheObject represents a build-cache DiskUsageObject. + BuildCacheObject DiskUsageObject = "build-cache" +) + +// DiskUsageOptions holds parameters for system disk usage query. +type DiskUsageOptions struct { + // Types specifies what object types to include in the response. If empty, + // all object types are returned. + Types []DiskUsageObject +} + // DiskUsage contains response of Engine API: // GET "/system/df" type DiskUsage struct { LayersSize int64 Images []*ImageSummary Containers []*Container - Volumes []*Volume + Volumes []*volume.Volume BuildCache []*BuildCache - BuilderSize int64 // deprecated + BuilderSize int64 `json:",omitempty"` // Deprecated: deprecated in API 1.38, and no longer used since API 1.40. } // ContainersPruneReport contains the response for Engine API: @@ -613,18 +774,31 @@ type BuildResult struct { ID string } -// BuildCache contains information about a build cache record +// BuildCache contains information about a build cache record. type BuildCache struct { - ID string - Parent string - Type string + // ID is the unique ID of the build cache record. + ID string + // Parent is the ID of the parent build cache record. + // + // Deprecated: deprecated in API v1.42 and up, as it was deprecated in BuildKit; use Parents instead. + Parent string `json:"Parent,omitempty"` + // Parents is the list of parent build cache record IDs. + Parents []string `json:" Parents,omitempty"` + // Type is the cache record type. + Type string + // Description is a description of the build-step that produced the build cache. Description string - InUse bool - Shared bool - Size int64 - CreatedAt time.Time - LastUsedAt *time.Time - UsageCount int + // InUse indicates if the build cache is in use. + InUse bool + // Shared indicates if the build cache is shared. + Shared bool + // Size is the amount of disk space used by the build cache (in bytes). + Size int64 + // CreatedAt is the date and time at which the build cache was created. + CreatedAt time.Time + // LastUsedAt is the date and time at which the build cache was last used. + LastUsedAt *time.Time + UsageCount int } // BuildCachePruneOptions hold parameters to prune the build cache diff --git a/vendor/github.com/docker/docker/api/types/versions/compare.go b/vendor/github.com/docker/docker/api/types/versions/compare.go index 8ccb0aa92e..489e917ee5 100644 --- a/vendor/github.com/docker/docker/api/types/versions/compare.go +++ b/vendor/github.com/docker/docker/api/types/versions/compare.go @@ -8,6 +8,9 @@ import ( // compare compares two version strings // returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. func compare(v1, v2 string) int { + if v1 == v2 { + return 0 + } var ( currTab = strings.Split(v1, ".") otherTab = strings.Split(v2, ".") diff --git a/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go new file mode 100644 index 0000000000..55fc5d3899 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/cluster_volume.go @@ -0,0 +1,420 @@ +package volume + +import ( + "github.com/docker/docker/api/types/swarm" +) + +// ClusterVolume contains options and information specific to, and only present +// on, Swarm CSI cluster volumes. +type ClusterVolume struct { + // ID is the Swarm ID of the volume. Because cluster volumes are Swarm + // objects, they have an ID, unlike non-cluster volumes, which only have a + // Name. This ID can be used to refer to the cluster volume. + ID string + + // Meta is the swarm metadata about this volume. + swarm.Meta + + // Spec is the cluster-specific options from which this volume is derived. + Spec ClusterVolumeSpec + + // PublishStatus contains the status of the volume as it pertains to its + // publishing on Nodes. + PublishStatus []*PublishStatus `json:",omitempty"` + + // Info is information about the global status of the volume. + Info *Info `json:",omitempty"` +} + +// ClusterVolumeSpec contains the spec used to create this volume. +type ClusterVolumeSpec struct { + // Group defines the volume group of this volume. Volumes belonging to the + // same group can be referred to by group name when creating Services. + // Referring to a volume by group instructs swarm to treat volumes in that + // group interchangeably for the purpose of scheduling. Volumes with an + // empty string for a group technically all belong to the same, emptystring + // group. + Group string `json:",omitempty"` + + // AccessMode defines how the volume is used by tasks. + AccessMode *AccessMode `json:",omitempty"` + + // AccessibilityRequirements specifies where in the cluster a volume must + // be accessible from. + // + // This field must be empty if the plugin does not support + // VOLUME_ACCESSIBILITY_CONSTRAINTS capabilities. If it is present but the + // plugin does not support it, volume will not be created. + // + // If AccessibilityRequirements is empty, but the plugin does support + // VOLUME_ACCESSIBILITY_CONSTRAINTS, then Swarmkit will assume the entire + // cluster is a valid target for the volume. + AccessibilityRequirements *TopologyRequirement `json:",omitempty"` + + // CapacityRange defines the desired capacity that the volume should be + // created with. If nil, the plugin will decide the capacity. + CapacityRange *CapacityRange `json:",omitempty"` + + // Secrets defines Swarm Secrets that are passed to the CSI storage plugin + // when operating on this volume. + Secrets []Secret `json:",omitempty"` + + // Availability is the Volume's desired availability. Analogous to Node + // Availability, this allows the user to take volumes offline in order to + // update or delete them. + Availability Availability `json:",omitempty"` +} + +// Availability specifies the availability of the volume. +type Availability string + +const ( + // AvailabilityActive indicates that the volume is active and fully + // schedulable on the cluster. + AvailabilityActive Availability = "active" + + // AvailabilityPause indicates that no new workloads should use the + // volume, but existing workloads can continue to use it. + AvailabilityPause Availability = "pause" + + // AvailabilityDrain indicates that all workloads using this volume + // should be rescheduled, and the volume unpublished from all nodes. + AvailabilityDrain Availability = "drain" +) + +// AccessMode defines the access mode of a volume. +type AccessMode struct { + // Scope defines the set of nodes this volume can be used on at one time. + Scope Scope `json:",omitempty"` + + // Sharing defines the number and way that different tasks can use this + // volume at one time. + Sharing SharingMode `json:",omitempty"` + + // MountVolume defines options for using this volume as a Mount-type + // volume. + // + // Either BlockVolume or MountVolume, but not both, must be present. + MountVolume *TypeMount `json:",omitempty"` + + // BlockVolume defines options for using this volume as a Block-type + // volume. + // + // Either BlockVolume or MountVolume, but not both, must be present. + BlockVolume *TypeBlock `json:",omitempty"` +} + +// Scope defines the Scope of a Cluster Volume. This is how many nodes a +// Volume can be accessed simultaneously on. +type Scope string + +const ( + // ScopeSingleNode indicates the volume can be used on one node at a + // time. + ScopeSingleNode Scope = "single" + + // ScopeMultiNode indicates the volume can be used on many nodes at + // the same time. + ScopeMultiNode Scope = "multi" +) + +// SharingMode defines the Sharing of a Cluster Volume. This is how Tasks using a +// Volume at the same time can use it. +type SharingMode string + +const ( + // SharingNone indicates that only one Task may use the Volume at a + // time. + SharingNone SharingMode = "none" + + // SharingReadOnly indicates that the Volume may be shared by any + // number of Tasks, but they must be read-only. + SharingReadOnly SharingMode = "readonly" + + // SharingOneWriter indicates that the Volume may be shared by any + // number of Tasks, but all after the first must be read-only. + SharingOneWriter SharingMode = "onewriter" + + // SharingAll means that the Volume may be shared by any number of + // Tasks, as readers or writers. + SharingAll SharingMode = "all" +) + +// TypeBlock defines options for using a volume as a block-type volume. +// +// Intentionally empty. +type TypeBlock struct{} + +// TypeMount contains options for using a volume as a Mount-type +// volume. +type TypeMount struct { + // FsType specifies the filesystem type for the mount volume. Optional. + FsType string `json:",omitempty"` + + // MountFlags defines flags to pass when mounting the volume. Optional. + MountFlags []string `json:",omitempty"` +} + +// TopologyRequirement expresses the user's requirements for a volume's +// accessible topology. +type TopologyRequirement struct { + // Requisite specifies a list of Topologies, at least one of which the + // volume must be accessible from. + // + // Taken verbatim from the CSI Spec: + // + // Specifies the list of topologies the provisioned volume MUST be + // accessible from. + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // If requisite is specified, the provisioned volume MUST be + // accessible from at least one of the requisite topologies. + // + // Given + // x = number of topologies provisioned volume is accessible from + // n = number of requisite topologies + // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1 + // If x==n, then the SP MUST make the provisioned volume available to + // all topologies from the list of requisite topologies. If it is + // unable to do so, the SP MUST fail the CreateVolume call. + // For example, if a volume should be accessible from a single zone, + // and requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2". + // Similarly, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and both "zone" "Z2" and "zone" "Z3". + // + // If xn, then the SP MUST make the provisioned volume available from + // all topologies from the list of requisite topologies and MAY choose + // the remaining x-n unique topologies from the list of all possible + // topologies. If it is unable to do so, the SP MUST fail the + // CreateVolume call. + // For example, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2" and the SP may select the second zone + // independently, e.g. "R1/Z4". + Requisite []Topology `json:",omitempty"` + + // Preferred is a list of Topologies that the volume should attempt to be + // provisioned in. + // + // Taken from the CSI spec: + // + // Specifies the list of topologies the CO would prefer the volume to + // be provisioned in. + // + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // An SP MUST attempt to make the provisioned volume available using + // the preferred topologies in order from first to last. + // + // If requisite is specified, all topologies in preferred list MUST + // also be present in the list of requisite topologies. + // + // If the SP is unable to to make the provisioned volume available + // from any of the preferred topologies, the SP MAY choose a topology + // from the list of requisite topologies. + // If the list of requisite topologies is not specified, then the SP + // MAY choose from the list of all possible topologies. + // If the list of requisite topologies is specified and the SP is + // unable to to make the provisioned volume available from any of the + // requisite topologies it MUST fail the CreateVolume call. + // + // Example 1: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // preferred = + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // available from "zone" "Z3" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. + // + // Example 2: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z2"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from "zone" "Z4" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. If that + // is not possible, the SP may choose between either the "zone" + // "Z3" or "Z5" in the "region" "R1". + // + // Example 3: + // Given a volume should be accessible from TWO zones (because an + // opaque parameter in CreateVolumeRequest, for example, specifies + // the volume is accessible from two zones, aka synchronously + // replicated), and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z5"}, + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from the combination of the two "zones" "Z5" and "Z3" in + // the "region" "R1". If that's not possible, it should fall back to + // a combination of "Z5" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of "Z3" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of other possibilities from the list of requisite. + Preferred []Topology `json:",omitempty"` +} + +// Topology is a map of topological domains to topological segments. +// +// This description is taken verbatim from the CSI Spec: +// +// A topological domain is a sub-division of a cluster, like "region", +// "zone", "rack", etc. +// A topological segment is a specific instance of a topological domain, +// like "zone3", "rack3", etc. +// For example {"com.company/zone": "Z1", "com.company/rack": "R3"} +// Valid keys have two segments: an OPTIONAL prefix and name, separated +// by a slash (/), for example: "com.company.example/zone". +// The key name segment is REQUIRED. The prefix is OPTIONAL. +// The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). +// The key prefix SHOULD include the plugin's host company name and/or +// the plugin name, to minimize the possibility of collisions with keys +// from other plugins. +// If a key prefix is specified, it MUST be identical across all +// topology keys returned by the SP (across all RPCs). +// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" +// MUST not both exist. +// Each value (topological segment) MUST contain 1 or more strings. +// Each string MUST be 63 characters or less and begin and end with an +// alphanumeric character with '-', '_', '.', or alphanumerics in +// between. +type Topology struct { + Segments map[string]string `json:",omitempty"` +} + +// CapacityRange describes the minimum and maximum capacity a volume should be +// created with +type CapacityRange struct { + // RequiredBytes specifies that a volume must be at least this big. The + // value of 0 indicates an unspecified minimum. + RequiredBytes int64 + + // LimitBytes specifies that a volume must not be bigger than this. The + // value of 0 indicates an unspecified maximum + LimitBytes int64 +} + +// Secret represents a Swarm Secret value that must be passed to the CSI +// storage plugin when operating on this Volume. It represents one key-value +// pair of possibly many. +type Secret struct { + // Key is the name of the key of the key-value pair passed to the plugin. + Key string + + // Secret is the swarm Secret object from which to read data. This can be a + // Secret name or ID. The Secret data is retrieved by Swarm and used as the + // value of the key-value pair passed to the plugin. + Secret string +} + +// PublishState represents the state of a Volume as it pertains to its +// use on a particular Node. +type PublishState string + +const ( + // StatePending indicates that the volume should be published on + // this node, but the call to ControllerPublishVolume has not been + // successfully completed yet and the result recorded by swarmkit. + StatePending PublishState = "pending-publish" + + // StatePublished means the volume is published successfully to the node. + StatePublished PublishState = "published" + + // StatePendingNodeUnpublish indicates that the Volume should be + // unpublished on the Node, and we're waiting for confirmation that it has + // done so. After the Node has confirmed that the Volume has been + // unpublished, the state will move to StatePendingUnpublish. + StatePendingNodeUnpublish PublishState = "pending-node-unpublish" + + // StatePendingUnpublish means the volume is still published to the node + // by the controller, awaiting the operation to unpublish it. + StatePendingUnpublish PublishState = "pending-controller-unpublish" +) + +// PublishStatus represents the status of the volume as published to an +// individual node +type PublishStatus struct { + // NodeID is the ID of the swarm node this Volume is published to. + NodeID string `json:",omitempty"` + + // State is the publish state of the volume. + State PublishState `json:",omitempty"` + + // PublishContext is the PublishContext returned by the CSI plugin when + // a volume is published. + PublishContext map[string]string `json:",omitempty"` +} + +// Info contains information about the Volume as a whole as provided by +// the CSI storage plugin. +type Info struct { + // CapacityBytes is the capacity of the volume in bytes. A value of 0 + // indicates that the capacity is unknown. + CapacityBytes int64 `json:",omitempty"` + + // VolumeContext is the context originating from the CSI storage plugin + // when the Volume is created. + VolumeContext map[string]string `json:",omitempty"` + + // VolumeID is the ID of the Volume as seen by the CSI storage plugin. This + // is distinct from the Volume's Swarm ID, which is the ID used by all of + // the Docker Engine to refer to the Volume. If this field is blank, then + // the Volume has not been successfully created yet. + VolumeID string `json:",omitempty"` + + // AccessibleTopolgoy is the topology this volume is actually accessible + // from. + AccessibleTopology []Topology `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/create_options.go b/vendor/github.com/docker/docker/api/types/volume/create_options.go new file mode 100644 index 0000000000..37c41a6096 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/create_options.go @@ -0,0 +1,29 @@ +package volume + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// CreateOptions VolumeConfig +// +// Volume configuration +// swagger:model CreateOptions +type CreateOptions struct { + + // cluster volume spec + ClusterVolumeSpec *ClusterVolumeSpec `json:"ClusterVolumeSpec,omitempty"` + + // Name of the volume driver to use. + Driver string `json:"Driver,omitempty"` + + // A mapping of driver options and values. These options are + // passed directly to the driver and are driver specific. + // + DriverOpts map[string]string `json:"DriverOpts,omitempty"` + + // User-defined key/value metadata. + Labels map[string]string `json:"Labels,omitempty"` + + // The new volume's name. If not specified, Docker generates a name. + // + Name string `json:"Name,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/deprecated.go b/vendor/github.com/docker/docker/api/types/volume/deprecated.go new file mode 100644 index 0000000000..ab622d8ccb --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/deprecated.go @@ -0,0 +1,11 @@ +package volume // import "github.com/docker/docker/api/types/volume" + +// VolumeCreateBody Volume configuration +// +// Deprecated: use CreateOptions +type VolumeCreateBody = CreateOptions + +// VolumeListOKBody Volume list response +// +// Deprecated: use ListResponse +type VolumeListOKBody = ListResponse diff --git a/vendor/github.com/docker/docker/api/types/volume/list_response.go b/vendor/github.com/docker/docker/api/types/volume/list_response.go new file mode 100644 index 0000000000..ca5192a2a9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/list_response.go @@ -0,0 +1,18 @@ +package volume + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ListResponse VolumeListResponse +// +// Volume list response +// swagger:model ListResponse +type ListResponse struct { + + // List of volumes + Volumes []*Volume `json:"Volumes"` + + // Warnings that occurred when fetching the list of volumes. + // + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/options.go b/vendor/github.com/docker/docker/api/types/volume/options.go new file mode 100644 index 0000000000..8b0dd13899 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/options.go @@ -0,0 +1,8 @@ +package volume // import "github.com/docker/docker/api/types/volume" + +import "github.com/docker/docker/api/types/filters" + +// ListOptions holds parameters to list volumes. +type ListOptions struct { + Filters filters.Args +} diff --git a/vendor/github.com/docker/docker/api/types/volume.go b/vendor/github.com/docker/docker/api/types/volume/volume.go similarity index 87% rename from vendor/github.com/docker/docker/api/types/volume.go rename to vendor/github.com/docker/docker/api/types/volume/volume.go index c69b08448d..ea7d555e5b 100644 --- a/vendor/github.com/docker/docker/api/types/volume.go +++ b/vendor/github.com/docker/docker/api/types/volume/volume.go @@ -1,4 +1,4 @@ -package types +package volume // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command @@ -7,6 +7,9 @@ package types // swagger:model Volume type Volume struct { + // cluster volume + ClusterVolume *ClusterVolume `json:"ClusterVolume,omitempty"` + // Date/Time the volume was created. CreatedAt string `json:"CreatedAt,omitempty"` @@ -47,14 +50,14 @@ type Volume struct { Status map[string]interface{} `json:"Status,omitempty"` // usage data - UsageData *VolumeUsageData `json:"UsageData,omitempty"` + UsageData *UsageData `json:"UsageData,omitempty"` } -// VolumeUsageData Usage details about the volume. This information is used by the +// UsageData Usage details about the volume. This information is used by the // `GET /system/df` endpoint, and omitted in other endpoints. // -// swagger:model VolumeUsageData -type VolumeUsageData struct { +// swagger:model UsageData +type UsageData struct { // The number of containers referencing this volume. This field // is set to `-1` if the reference-count is not available. diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_create.go b/vendor/github.com/docker/docker/api/types/volume/volume_create.go deleted file mode 100644 index 8538078dd6..0000000000 --- a/vendor/github.com/docker/docker/api/types/volume/volume_create.go +++ /dev/null @@ -1,31 +0,0 @@ -package volume // import "github.com/docker/docker/api/types/volume" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// VolumeCreateBody Volume configuration -// swagger:model VolumeCreateBody -type VolumeCreateBody struct { - - // Name of the volume driver to use. - // Required: true - Driver string `json:"Driver"` - - // A mapping of driver options and values. These options are - // passed directly to the driver and are driver specific. - // - // Required: true - DriverOpts map[string]string `json:"DriverOpts"` - - // User-defined key/value metadata. - // Required: true - Labels map[string]string `json:"Labels"` - - // The new volume's name. If not specified, Docker generates a name. - // - // Required: true - Name string `json:"Name"` -} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_list.go b/vendor/github.com/docker/docker/api/types/volume/volume_list.go deleted file mode 100644 index be06179bf4..0000000000 --- a/vendor/github.com/docker/docker/api/types/volume/volume_list.go +++ /dev/null @@ -1,23 +0,0 @@ -package volume // import "github.com/docker/docker/api/types/volume" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -import "github.com/docker/docker/api/types" - -// VolumeListOKBody Volume list response -// swagger:model VolumeListOKBody -type VolumeListOKBody struct { - - // List of volumes - // Required: true - Volumes []*types.Volume `json:"Volumes"` - - // Warnings that occurred when fetching the list of volumes. - // - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_update.go b/vendor/github.com/docker/docker/api/types/volume/volume_update.go new file mode 100644 index 0000000000..f958f80a66 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/volume_update.go @@ -0,0 +1,7 @@ +package volume // import "github.com/docker/docker/api/types/volume" + +// UpdateOptions is configuration to update a Volume with. +type UpdateOptions struct { + // Spec is the ClusterVolumeSpec to update the volume to. + Spec *ClusterVolumeSpec `json:"Spec,omitempty"` +} diff --git a/vendor/github.com/docker/docker/client/build_cancel.go b/vendor/github.com/docker/docker/client/build_cancel.go index 3aae43e3d1..b76bf366bb 100644 --- a/vendor/github.com/docker/docker/client/build_cancel.go +++ b/vendor/github.com/docker/docker/client/build_cancel.go @@ -5,7 +5,7 @@ import ( "net/url" ) -// BuildCancel requests the daemon to cancel ongoing build request +// BuildCancel requests the daemon to cancel the ongoing build request. func (cli *Client) BuildCancel(ctx context.Context, id string) error { query := url.Values{} query.Set("id", id) diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go index 66d46dd161..39cfb959ff 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_list.go +++ b/vendor/github.com/docker/docker/client/checkpoint_list.go @@ -20,7 +20,7 @@ func (cli *Client) CheckpointList(ctx context.Context, container string, options resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) defer ensureReaderClosed(resp) if err != nil { - return checkpoints, wrapResponseError(err, resp, "container", container) + return checkpoints, err } err = json.NewDecoder(resp.body).Decode(&checkpoints) diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go index 9b2b2eaeb8..26a0fa2756 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/docker/docker/client/client.go @@ -4,7 +4,7 @@ Package client is a Go client for the Docker Engine API. For more information about the Engine API, see the documentation: https://docs.docker.com/engine/api/ -Usage +# Usage You use the library by creating a client object and calling methods on it. The client can be created either from environment variables with NewClientWithOpts(client.FromEnv), @@ -37,13 +37,11 @@ For example, to list running containers (the equivalent of "docker ps"): fmt.Printf("%s %s\n", container.ID[:10], container.Image) } } - */ package client // import "github.com/docker/docker/client" import ( "context" - "fmt" "net" "net/http" "net/url" @@ -93,15 +91,18 @@ type Client struct { } // CheckRedirect specifies the policy for dealing with redirect responses: -// If the request is non-GET return `ErrRedirect`. Otherwise use the last response. +// If the request is non-GET return ErrRedirect, otherwise use the last response. +// +// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) +// in the client. The Docker client (and by extension docker API client) can be +// made to send a request like POST /containers//start where what would normally +// be in the name section of the URL is empty. This triggers an HTTP 301 from +// the daemon. // -// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) in the client . -// The Docker client (and by extension docker API client) can be made to send a request -// like POST /containers//start where what would normally be in the name section of the URL is empty. -// This triggers an HTTP 301 from the daemon. -// In go 1.8 this 301 will be converted to a GET request, and ends up getting a 404 from the daemon. -// This behavior change manifests in the client in that before the 301 was not followed and -// the client did not generate an error, but now results in a message like Error response from daemon: page not found. +// In go 1.8 this 301 will be converted to a GET request, and ends up getting +// a 404 from the daemon. This behavior change manifests in the client in that +// before, the 301 was not followed and the client did not generate an error, +// but now results in a message like Error response from daemon: page not found. func CheckRedirect(req *http.Request, via []*http.Request) error { if via[0].Method == http.MethodGet { return http.ErrUseLastResponse @@ -109,13 +110,20 @@ func CheckRedirect(req *http.Request, via []*http.Request) error { return ErrRedirect } -// NewClientWithOpts initializes a new API client with default values. It takes functors -// to modify values when creating it, like `NewClientWithOpts(WithVersion(…))` -// It also initializes the custom http headers to add to each request. +// NewClientWithOpts initializes a new API client with a default HTTPClient, and +// default API host and version. It also initializes the custom HTTP headers to +// add to each request. // -// It won't send any version information if the version number is empty. It is -// highly recommended that you set a version or your client may break if the -// server is upgraded. +// It takes an optional list of Opt functional arguments, which are applied in +// the order they're provided, which allows modifying the defaults when creating +// the client. For example, the following initializes a client that configures +// itself with values from environment variables (client.FromEnv), and has +// automatic API version negotiation enabled (client.WithAPIVersionNegotiation()). +// +// cli, err := client.NewClientWithOpts( +// client.FromEnv, +// client.WithAPIVersionNegotiation(), +// ) func NewClientWithOpts(ops ...Opt) (*Client, error) { client, err := defaultHTTPClient(DefaultDockerHost) if err != nil { @@ -153,12 +161,12 @@ func NewClientWithOpts(ops ...Opt) (*Client, error) { } func defaultHTTPClient(host string) (*http.Client, error) { - url, err := ParseHostURL(host) + hostURL, err := ParseHostURL(host) if err != nil { return nil, err } - transport := new(http.Transport) - sockets.ConfigureTransport(transport, url.Scheme, url.Host) + transport := &http.Transport{} + _ = sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host) return &http.Client{ Transport: transport, CheckRedirect: CheckRedirect, @@ -194,11 +202,21 @@ func (cli *Client) ClientVersion() string { return cli.version } -// NegotiateAPIVersion queries the API and updates the version to match the -// API version. Any errors are silently ignored. If a manual override is in place, -// either through the `DOCKER_API_VERSION` environment variable, or if the client -// was initialized with a fixed version (`opts.WithVersion(xx)`), no negotiation -// will be performed. +// NegotiateAPIVersion queries the API and updates the version to match the API +// version. NegotiateAPIVersion downgrades the client's API version to match the +// APIVersion if the ping version is lower than the default version. If the API +// version reported by the server is higher than the maximum version supported +// by the client, it uses the client's maximum version. +// +// If a manual override is in place, either through the "DOCKER_API_VERSION" +// (EnvOverrideAPIVersion) environment variable, or if the client is initialized +// with a fixed version (WithVersion(xx)), no negotiation is performed. +// +// If the API server's ping response does not contain an API version, or if the +// client did not get a successful ping response, it assumes it is connected with +// an old daemon that does not support API version negotiation, in which case it +// downgrades to the latest version of the API before version negotiation was +// added (1.24). func (cli *Client) NegotiateAPIVersion(ctx context.Context) { if !cli.manualOverride { ping, _ := cli.Ping(ctx) @@ -206,23 +224,31 @@ func (cli *Client) NegotiateAPIVersion(ctx context.Context) { } } -// NegotiateAPIVersionPing updates the client version to match the Ping.APIVersion -// if the ping version is less than the default version. If a manual override is -// in place, either through the `DOCKER_API_VERSION` environment variable, or if -// the client was initialized with a fixed version (`opts.WithVersion(xx)`), no -// negotiation is performed. -func (cli *Client) NegotiateAPIVersionPing(p types.Ping) { +// NegotiateAPIVersionPing downgrades the client's API version to match the +// APIVersion in the ping response. If the API version in pingResponse is higher +// than the maximum version supported by the client, it uses the client's maximum +// version. +// +// If a manual override is in place, either through the "DOCKER_API_VERSION" +// (EnvOverrideAPIVersion) environment variable, or if the client is initialized +// with a fixed version (WithVersion(xx)), no negotiation is performed. +// +// If the API server's ping response does not contain an API version, we assume +// we are connected with an old daemon without API version negotiation support, +// and downgrade to the latest version of the API before version negotiation was +// added (1.24). +func (cli *Client) NegotiateAPIVersionPing(pingResponse types.Ping) { if !cli.manualOverride { - cli.negotiateAPIVersionPing(p) + cli.negotiateAPIVersionPing(pingResponse) } } // negotiateAPIVersionPing queries the API and updates the version to match the -// API version. Any errors are silently ignored. -func (cli *Client) negotiateAPIVersionPing(p types.Ping) { - // try the latest version before versioning headers existed - if p.APIVersion == "" { - p.APIVersion = "1.24" +// API version from the ping response. +func (cli *Client) negotiateAPIVersionPing(pingResponse types.Ping) { + // default to the latest version before versioning headers existed + if pingResponse.APIVersion == "" { + pingResponse.APIVersion = "1.24" } // if the client is not initialized with a version, start with the latest supported version @@ -231,8 +257,8 @@ func (cli *Client) negotiateAPIVersionPing(p types.Ping) { } // if server version is lower than the client version, downgrade - if versions.LessThan(p.APIVersion, cli.version) { - cli.version = p.APIVersion + if versions.LessThan(pingResponse.APIVersion, cli.version) { + cli.version = pingResponse.APIVersion } // Store the results, so that automatic API version negotiation (if enabled) @@ -258,7 +284,7 @@ func (cli *Client) HTTPClient() *http.Client { func ParseHostURL(host string) (*url.URL, error) { protoAddrParts := strings.SplitN(host, "://", 2) if len(protoAddrParts) == 1 { - return nil, fmt.Errorf("unable to parse docker host `%s`", host) + return nil, errors.Errorf("unable to parse docker host `%s`", host) } var basePath string @@ -278,22 +304,9 @@ func ParseHostURL(host string) (*url.URL, error) { }, nil } -// CustomHTTPHeaders returns the custom http headers stored by the client. -func (cli *Client) CustomHTTPHeaders() map[string]string { - m := make(map[string]string) - for k, v := range cli.customHTTPHeaders { - m[k] = v - } - return m -} - -// SetCustomHTTPHeaders that will be set on every HTTP request made by the client. -// Deprecated: use WithHTTPHeaders when creating the client. -func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) { - cli.customHTTPHeaders = headers -} - -// Dialer returns a dialer for a raw stream connection, with HTTP/1.1 header, that can be used for proxying the daemon connection. +// Dialer returns a dialer for a raw stream connection, with an HTTP/1.1 header, +// that can be used for proxying the daemon connection. +// // Used by `docker dial-stdio` (docker/cli#889). func (cli *Client) Dialer() func(context.Context) (net.Conn, error) { return func(ctx context.Context) (net.Conn, error) { diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go index 5846f888fe..f0783f7085 100644 --- a/vendor/github.com/docker/docker/client/client_unix.go +++ b/vendor/github.com/docker/docker/client/client_unix.go @@ -3,7 +3,8 @@ package client // import "github.com/docker/docker/client" -// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST +// (EnvOverrideHost) environment variable is unset or empty. const DefaultDockerHost = "unix:///var/run/docker.sock" const defaultProto = "unix" diff --git a/vendor/github.com/docker/docker/client/client_windows.go b/vendor/github.com/docker/docker/client/client_windows.go index c649e54412..5abe60457d 100644 --- a/vendor/github.com/docker/docker/client/client_windows.go +++ b/vendor/github.com/docker/docker/client/client_windows.go @@ -1,6 +1,7 @@ package client // import "github.com/docker/docker/client" -// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST +// (EnvOverrideHost) environment variable is unset or empty. const DefaultDockerHost = "npipe:////./pipe/docker_engine" const defaultProto = "npipe" diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go index ee7d411df0..f6b1881fc3 100644 --- a/vendor/github.com/docker/docker/client/config_create.go +++ b/vendor/github.com/docker/docker/client/config_create.go @@ -8,7 +8,7 @@ import ( "github.com/docker/docker/api/types/swarm" ) -// ConfigCreate creates a new Config. +// ConfigCreate creates a new config. func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) { var response types.ConfigCreateResponse if err := cli.NewVersionError("1.30", "config create"); err != nil { diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go index 7d0ce3e11c..9be7882c3d 100644 --- a/vendor/github.com/docker/docker/client/config_inspect.go +++ b/vendor/github.com/docker/docker/client/config_inspect.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "encoding/json" - "io/ioutil" + "io" "github.com/docker/docker/api/types/swarm" ) @@ -20,10 +20,10 @@ func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.C resp, err := cli.get(ctx, "/configs/"+id, nil, nil) defer ensureReaderClosed(resp) if err != nil { - return swarm.Config{}, nil, wrapResponseError(err, resp, "config", id) + return swarm.Config{}, nil, err } - body, err := ioutil.ReadAll(resp.body) + body, err := io.ReadAll(resp.body) if err != nil { return swarm.Config{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go index a708fcaecf..24b94e9c18 100644 --- a/vendor/github.com/docker/docker/client/config_remove.go +++ b/vendor/github.com/docker/docker/client/config_remove.go @@ -2,12 +2,12 @@ package client // import "github.com/docker/docker/client" import "context" -// ConfigRemove removes a Config. +// ConfigRemove removes a config. func (cli *Client) ConfigRemove(ctx context.Context, id string) error { if err := cli.NewVersionError("1.30", "config remove"); err != nil { return err } resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "config", id) + return err } diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go index 39e59cf858..1ac2985435 100644 --- a/vendor/github.com/docker/docker/client/config_update.go +++ b/vendor/github.com/docker/docker/client/config_update.go @@ -3,18 +3,17 @@ package client // import "github.com/docker/docker/client" import ( "context" "net/url" - "strconv" "github.com/docker/docker/api/types/swarm" ) -// ConfigUpdate attempts to update a Config +// ConfigUpdate attempts to update a config func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error { if err := cli.NewVersionError("1.30", "config update"); err != nil { return err } query := url.Values{} - query.Set("version", strconv.FormatUint(version.Index, 10)) + query.Set("version", version.String()) resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil) ensureReaderClosed(resp) return err diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go index 88ba1ef639..ba92117d3e 100644 --- a/vendor/github.com/docker/docker/client/container_attach.go +++ b/vendor/github.com/docker/docker/client/container_attach.go @@ -22,7 +22,7 @@ import ( // multiplexed. // The format of the multiplexed stream is as follows: // -// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} // // STREAM_TYPE can be 1 for stdout and 2 for stderr // @@ -52,6 +52,8 @@ func (cli *Client) ContainerAttach(ctx context.Context, container string, option query.Set("logs", "1") } - headers := map[string][]string{"Content-Type": {"text/plain"}} + headers := map[string][]string{ + "Content-Type": {"text/plain"}, + } return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) } diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go index 2966e88c8e..cd7f763464 100644 --- a/vendor/github.com/docker/docker/client/container_commit.go +++ b/vendor/github.com/docker/docker/client/container_commit.go @@ -10,7 +10,7 @@ import ( "github.com/docker/docker/api/types" ) -// ContainerCommit applies changes into a container and creates a new tagged image. +// ContainerCommit applies changes to a container and creates a new tagged image. func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) { var repository, tag string if options.Reference != "" { diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go index bb278bf7f3..883be7fa34 100644 --- a/vendor/github.com/docker/docker/client/container_copy.go +++ b/vendor/github.com/docker/docker/client/container_copy.go @@ -14,7 +14,7 @@ import ( "github.com/docker/docker/api/types" ) -// ContainerStatPath returns Stat information about a path inside the container filesystem. +// ContainerStatPath returns stat information about a path inside the container filesystem. func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { query := url.Values{} query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. @@ -23,7 +23,7 @@ func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path stri response, err := cli.head(ctx, urlStr, query, nil) defer ensureReaderClosed(response) if err != nil { - return types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+path) + return types.ContainerPathStat{}, err } return getContainerPathStatFromHeader(response.header) } @@ -47,12 +47,7 @@ func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath str response, err := cli.putRaw(ctx, apiPath, query, content, nil) defer ensureReaderClosed(response) if err != nil { - return wrapResponseError(err, response, "container:path", containerID+":"+dstPath) - } - - // TODO this code converts non-error status-codes (e.g., "204 No Content") into an error; verify if this is the desired behavior - if response.statusCode != http.StatusOK { - return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + return err } return nil @@ -67,12 +62,7 @@ func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath s apiPath := "/containers/" + containerID + "/archive" response, err := cli.get(ctx, apiPath, query, nil) if err != nil { - return nil, types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+srcPath) - } - - // TODO this code converts non-error status-codes (e.g., "204 No Content") into an error; verify if this is the desired behavior - if response.statusCode != http.StatusOK { - return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + return nil, types.ContainerPathStat{}, err } // In order to get the copy behavior right, we need to know information diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go index c5079ee539..f82420b673 100644 --- a/vendor/github.com/docker/docker/client/container_create.go +++ b/vendor/github.com/docker/docker/client/container_create.go @@ -18,24 +18,33 @@ type configWrapper struct { NetworkingConfig *network.NetworkingConfig } -// ContainerCreate creates a new container based in the given configuration. +// ContainerCreate creates a new container based on the given configuration. // It can be associated with a name, but it's not mandatory. -func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.ContainerCreateCreatedBody, error) { - var response container.ContainerCreateCreatedBody +func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.CreateResponse, error) { + var response container.CreateResponse if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { return response, err } - - // When using API 1.24 and under, the client is responsible for removing the container - if hostConfig != nil && versions.LessThan(cli.ClientVersion(), "1.25") { - hostConfig.AutoRemove = false - } - if err := cli.NewVersionError("1.41", "specify container image platform"); platform != nil && err != nil { return response, err } + if hostConfig != nil { + if versions.LessThan(cli.ClientVersion(), "1.25") { + // When using API 1.24 and under, the client is responsible for removing the container + hostConfig.AutoRemove = false + } + if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.42") || versions.LessThan(cli.ClientVersion(), "1.40") { + // KernelMemory was added in API 1.40, and deprecated in API 1.42 + hostConfig.KernelMemory = 0 + } + if platform != nil && platform.OS == "linux" && versions.LessThan(cli.ClientVersion(), "1.42") { + // When using API under 1.42, the Linux daemon doesn't respect the ConsoleSize + hostConfig.ConsoleSize = [2]uint{0, 0} + } + } + query := url.Values{} if p := formatPlatform(platform); p != "" { query.Set("platform", p) diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go index e3ee755b71..6a2cb006f8 100644 --- a/vendor/github.com/docker/docker/client/container_exec.go +++ b/vendor/github.com/docker/docker/client/container_exec.go @@ -5,6 +5,7 @@ import ( "encoding/json" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" ) // ContainerExecCreate creates a new exec configuration to run an exec process. @@ -14,6 +15,9 @@ func (cli *Client) ContainerExecCreate(ctx context.Context, container string, co if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil { return response, err } + if versions.LessThan(cli.ClientVersion(), "1.42") { + config.ConsoleSize = nil + } resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) defer ensureReaderClosed(resp) @@ -26,6 +30,9 @@ func (cli *Client) ContainerExecCreate(ctx context.Context, container string, co // ContainerExecStart starts an exec process already created in the docker host. func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { + if versions.LessThan(cli.ClientVersion(), "1.42") { + config.ConsoleSize = nil + } resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) ensureReaderClosed(resp) return err @@ -36,7 +43,12 @@ func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config // and the a reader to get output. It's up to the called to close // the hijacked connection by calling types.HijackedResponse.Close. func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) { - headers := map[string][]string{"Content-Type": {"application/json"}} + if versions.LessThan(cli.ClientVersion(), "1.42") { + config.ConsoleSize = nil + } + headers := map[string][]string{ + "Content-Type": {"application/json"}, + } return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) } diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go index c496bcffea..d48f0d3a68 100644 --- a/vendor/github.com/docker/docker/client/container_inspect.go +++ b/vendor/github.com/docker/docker/client/container_inspect.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "encoding/json" - "io/ioutil" + "io" "net/url" "github.com/docker/docker/api/types" @@ -18,7 +18,7 @@ func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (ty serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) defer ensureReaderClosed(serverResp) if err != nil { - return types.ContainerJSON{}, wrapResponseError(err, serverResp, "container", containerID) + return types.ContainerJSON{}, err } var response types.ContainerJSON @@ -38,10 +38,10 @@ func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID stri serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) defer ensureReaderClosed(serverResp) if err != nil { - return types.ContainerJSON{}, nil, wrapResponseError(err, serverResp, "container", containerID) + return types.ContainerJSON{}, nil, err } - body, err := ioutil.ReadAll(serverResp.body) + body, err := io.ReadAll(serverResp.body) if err != nil { return types.ContainerJSON{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/container_kill.go b/vendor/github.com/docker/docker/client/container_kill.go index 4d6f1d23da..7c9529f1e1 100644 --- a/vendor/github.com/docker/docker/client/container_kill.go +++ b/vendor/github.com/docker/docker/client/container_kill.go @@ -8,7 +8,9 @@ import ( // ContainerKill terminates the container process but does not remove the container from the docker host. func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { query := url.Values{} - query.Set("signal", signal) + if signal != "" { + query.Set("signal", signal) + } resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go index a973de597f..bd491b3db9 100644 --- a/vendor/github.com/docker/docker/client/container_list.go +++ b/vendor/github.com/docker/docker/client/container_list.go @@ -18,7 +18,7 @@ func (cli *Client) ContainerList(ctx context.Context, options types.ContainerLis query.Set("all", "1") } - if options.Limit != -1 { + if options.Limit > 0 { query.Set("limit", strconv.Itoa(options.Limit)) } diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go index 5b6541f035..9bdf2b0fa6 100644 --- a/vendor/github.com/docker/docker/client/container_logs.go +++ b/vendor/github.com/docker/docker/client/container_logs.go @@ -24,7 +24,7 @@ import ( // multiplexed. // The format of the multiplexed stream is as follows: // -// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} // // STREAM_TYPE can be 1 for stdout and 2 for stderr // @@ -74,7 +74,7 @@ func (cli *Client) ContainerLogs(ctx context.Context, container string, options resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) if err != nil { - return nil, wrapResponseError(err, resp, "container", container) + return nil, err } return resp.body, nil } diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go index df81461b88..c21de609b0 100644 --- a/vendor/github.com/docker/docker/client/container_remove.go +++ b/vendor/github.com/docker/docker/client/container_remove.go @@ -23,5 +23,5 @@ func (cli *Client) ContainerRemove(ctx context.Context, containerID string, opti resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "container", containerID) + return err } diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go index 41e421969f..1e0ad99981 100644 --- a/vendor/github.com/docker/docker/client/container_restart.go +++ b/vendor/github.com/docker/docker/client/container_restart.go @@ -3,18 +3,22 @@ package client // import "github.com/docker/docker/client" import ( "context" "net/url" - "time" + "strconv" - timetypes "github.com/docker/docker/api/types/time" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" ) // ContainerRestart stops and starts a container again. -// It makes the daemon to wait for the container to be up again for +// It makes the daemon wait for the container to be up again for // a specific amount of time, given the timeout. -func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout *time.Duration) error { +func (cli *Client) ContainerRestart(ctx context.Context, containerID string, options container.StopOptions) error { query := url.Values{} - if timeout != nil { - query.Set("t", timetypes.DurationToSecondsString(*timeout)) + if options.Timeout != nil { + query.Set("t", strconv.Itoa(*options.Timeout)) + } + if options.Signal != "" && versions.GreaterThanOrEqualTo(cli.version, "1.42") { + query.Set("signal", options.Signal) } resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go index 629d7ab64c..2a43ce2274 100644 --- a/vendor/github.com/docker/docker/client/container_stop.go +++ b/vendor/github.com/docker/docker/client/container_stop.go @@ -3,9 +3,10 @@ package client // import "github.com/docker/docker/client" import ( "context" "net/url" - "time" + "strconv" - timetypes "github.com/docker/docker/api/types/time" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" ) // ContainerStop stops a container. In case the container fails to stop @@ -15,10 +16,13 @@ import ( // If the timeout is nil, the container's StopTimeout value is used, if set, // otherwise the engine default. A negative timeout value can be specified, // meaning no timeout, i.e. no forceful termination is performed. -func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error { +func (cli *Client) ContainerStop(ctx context.Context, containerID string, options container.StopOptions) error { query := url.Values{} - if timeout != nil { - query.Set("t", timetypes.DurationToSecondsString(*timeout)) + if options.Timeout != nil { + query.Set("t", strconv.Itoa(*options.Timeout)) + } + if options.Signal != "" && versions.GreaterThanOrEqualTo(cli.version, "1.42") { + query.Set("signal", options.Signal) } resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go index 6917cf9fb3..bf68a5300e 100644 --- a/vendor/github.com/docker/docker/client/container_update.go +++ b/vendor/github.com/docker/docker/client/container_update.go @@ -7,7 +7,7 @@ import ( "github.com/docker/docker/api/types/container" ) -// ContainerUpdate updates resources of a container +// ContainerUpdate updates the resources of a container. func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) { var response container.ContainerUpdateOKBody serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go index 6ab8c1da96..2375eb1e80 100644 --- a/vendor/github.com/docker/docker/client/container_wait.go +++ b/vendor/github.com/docker/docker/client/container_wait.go @@ -1,14 +1,19 @@ package client // import "github.com/docker/docker/client" import ( + "bytes" "context" "encoding/json" + "errors" + "io" "net/url" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/versions" ) +const containerWaitErrorMsgLimit = 2 * 1024 /* Max: 2KiB */ + // ContainerWait waits until the specified container is in a certain state // indicated by the given condition, either "not-running" (default), // "next-exit", or "removed". @@ -24,16 +29,18 @@ import ( // wait request or in getting the response. This allows the caller to // synchronize ContainerWait with other calls, such as specifying a // "next-exit" condition before issuing a ContainerStart request. -func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) { +func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) { if versions.LessThan(cli.ClientVersion(), "1.30") { return cli.legacyContainerWait(ctx, containerID) } - resultC := make(chan container.ContainerWaitOKBody) + resultC := make(chan container.WaitResponse) errC := make(chan error, 1) query := url.Values{} - query.Set("condition", string(condition)) + if condition != "" { + query.Set("condition", string(condition)) + } resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil) if err != nil { @@ -44,9 +51,23 @@ func (cli *Client) ContainerWait(ctx context.Context, containerID string, condit go func() { defer ensureReaderClosed(resp) - var res container.ContainerWaitOKBody - if err := json.NewDecoder(resp.body).Decode(&res); err != nil { - errC <- err + + body := resp.body + responseText := bytes.NewBuffer(nil) + stream := io.TeeReader(body, responseText) + + var res container.WaitResponse + if err := json.NewDecoder(stream).Decode(&res); err != nil { + // NOTE(nicks): The /wait API does not work well with HTTP proxies. + // At any time, the proxy could cut off the response stream. + // + // But because the HTTP status has already been written, the proxy's + // only option is to write a plaintext error message. + // + // If there's a JSON parsing error, read the real error message + // off the body and send it to the client. + _, _ = io.ReadAll(io.LimitReader(stream, containerWaitErrorMsgLimit)) + errC <- errors.New(responseText.String()) return } @@ -58,8 +79,8 @@ func (cli *Client) ContainerWait(ctx context.Context, containerID string, condit // legacyContainerWait returns immediately and doesn't have an option to wait // until the container is removed. -func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.ContainerWaitOKBody, <-chan error) { - resultC := make(chan container.ContainerWaitOKBody) +func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.WaitResponse, <-chan error) { + resultC := make(chan container.WaitResponse) errC := make(chan error) go func() { @@ -70,7 +91,7 @@ func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) } defer ensureReaderClosed(resp) - var res container.ContainerWaitOKBody + var res container.WaitResponse if err := json.NewDecoder(resp.body).Decode(&res); err != nil { errC <- err return diff --git a/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/docker/docker/client/disk_usage.go index 354cd36939..ba0d92e9e6 100644 --- a/vendor/github.com/docker/docker/client/disk_usage.go +++ b/vendor/github.com/docker/docker/client/disk_usage.go @@ -4,23 +4,30 @@ import ( "context" "encoding/json" "fmt" + "net/url" "github.com/docker/docker/api/types" ) // DiskUsage requests the current data usage from the daemon -func (cli *Client) DiskUsage(ctx context.Context) (types.DiskUsage, error) { - var du types.DiskUsage +func (cli *Client) DiskUsage(ctx context.Context, options types.DiskUsageOptions) (types.DiskUsage, error) { + var query url.Values + if len(options.Types) > 0 { + query = url.Values{} + for _, t := range options.Types { + query.Add("type", string(t)) + } + } - serverResp, err := cli.get(ctx, "/system/df", nil, nil) + serverResp, err := cli.get(ctx, "/system/df", query, nil) defer ensureReaderClosed(serverResp) if err != nil { - return du, err + return types.DiskUsage{}, err } + var du types.DiskUsage if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil { - return du, fmt.Errorf("Error retrieving disk usage: %v", err) + return types.DiskUsage{}, fmt.Errorf("Error retrieving disk usage: %v", err) } - return du, nil } diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go index f4e3794cb4..7f36c99a01 100644 --- a/vendor/github.com/docker/docker/client/distribution_inspect.go +++ b/vendor/github.com/docker/docker/client/distribution_inspect.go @@ -8,7 +8,7 @@ import ( registrytypes "github.com/docker/docker/api/types/registry" ) -// DistributionInspect returns the image digest with full Manifest +// DistributionInspect returns the image digest with the full manifest. func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registrytypes.DistributionInspect, error) { // Contact the registry to retrieve digest and platform information var distributionInspect registrytypes.DistributionInspect diff --git a/vendor/github.com/docker/docker/client/envvars.go b/vendor/github.com/docker/docker/client/envvars.go new file mode 100644 index 0000000000..61dd45c1d7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/envvars.go @@ -0,0 +1,90 @@ +package client // import "github.com/docker/docker/client" + +const ( + // EnvOverrideHost is the name of the environment variable that can be used + // to override the default host to connect to (DefaultDockerHost). + // + // This env-var is read by FromEnv and WithHostFromEnv and when set to a + // non-empty value, takes precedence over the default host (which is platform + // specific), or any host already set. + EnvOverrideHost = "DOCKER_HOST" + + // EnvOverrideAPIVersion is the name of the environment variable that can + // be used to override the API version to use. Value should be + // formatted as MAJOR.MINOR, for example, "1.19". + // + // This env-var is read by FromEnv and WithVersionFromEnv and when set to a + // non-empty value, takes precedence over API version negotiation. + // + // This environment variable should be used for debugging purposes only, as + // it can set the client to use an incompatible (or invalid) API version. + EnvOverrideAPIVersion = "DOCKER_API_VERSION" + + // EnvOverrideCertPath is the name of the environment variable that can be + // used to specify the directory from which to load the TLS certificates + // (ca.pem, cert.pem, key.pem) from. These certificates are used to configure + // the Client for a TCP connection protected by TLS client authentication. + // + // TLS certificate verification is enabled by default if the Client is configured + // to use a TLS connection. Refer to EnvTLSVerify below to learn how to + // disable verification for testing purposes. + // + // WARNING: Access to the remote API is equivalent to root access to the + // host where the daemon runs. Do not expose the API without protection, + // and only if needed. Make sure you are familiar with the "daemon attack + // surface" (https://docs.docker.com/go/attack-surface/). + // + // For local access to the API, it is recommended to connect with the daemon + // using the default local socket connection (on Linux), or the named pipe + // (on Windows). + // + // If you need to access the API of a remote daemon, consider using an SSH + // (ssh://) connection, which is easier to set up, and requires no additional + // configuration if the host is accessible using ssh. + // + // If you cannot use the alternatives above, and you must expose the API over + // a TCP connection, refer to https://docs.docker.com/engine/security/protect-access/ + // to learn how to configure the daemon and client to use a TCP connection + // with TLS client authentication. Make sure you know the differences between + // a regular TLS connection and a TLS connection protected by TLS client + // authentication, and verify that the API cannot be accessed by other clients. + EnvOverrideCertPath = "DOCKER_CERT_PATH" + + // EnvTLSVerify is the name of the environment variable that can be used to + // enable or disable TLS certificate verification. When set to a non-empty + // value, TLS certificate verification is enabled, and the client is configured + // to use a TLS connection, using certificates from the default directories + // (within `~/.docker`); refer to EnvOverrideCertPath above for additional + // details. + // + // WARNING: Access to the remote API is equivalent to root access to the + // host where the daemon runs. Do not expose the API without protection, + // and only if needed. Make sure you are familiar with the "daemon attack + // surface" (https://docs.docker.com/go/attack-surface/). + // + // Before setting up your client and daemon to use a TCP connection with TLS + // client authentication, consider using one of the alternatives mentioned + // in EnvOverrideCertPath above. + // + // Disabling TLS certificate verification (for testing purposes) + // + // TLS certificate verification is enabled by default if the Client is configured + // to use a TLS connection, and it is highly recommended to keep verification + // enabled to prevent machine-in-the-middle attacks. Refer to the documentation + // at https://docs.docker.com/engine/security/protect-access/ and pages linked + // from that page to learn how to configure the daemon and client to use a + // TCP connection with TLS client authentication enabled. + // + // Set the "DOCKER_TLS_VERIFY" environment to an empty string ("") to + // disable TLS certificate verification. Disabling verification is insecure, + // so should only be done for testing purposes. From the Go documentation + // (https://pkg.go.dev/crypto/tls#Config): + // + // InsecureSkipVerify controls whether a client verifies the server's + // certificate chain and host name. If InsecureSkipVerify is true, crypto/tls + // accepts any certificate presented by the server and any host name in that + // certificate. In this mode, TLS is susceptible to machine-in-the-middle + // attacks unless custom verification is used. This should be used only for + // testing or in combination with VerifyConnection or VerifyPeerCertificate. + EnvTLSVerify = "DOCKER_TLS_VERIFY" +) diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go index 041bc8d49c..e5a8a865f9 100644 --- a/vendor/github.com/docker/docker/client/errors.go +++ b/vendor/github.com/docker/docker/client/errors.go @@ -2,7 +2,6 @@ package client // import "github.com/docker/docker/client" import ( "fmt" - "net/http" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/errdefs" @@ -41,11 +40,11 @@ type notFound interface { // IsErrNotFound returns true if the error is a NotFound error, which is returned // by the API when some object is not found. func IsErrNotFound(err error) bool { - var e notFound - if errors.As(err, &e) { + if errdefs.IsNotFound(err) { return true } - return errdefs.IsNotFound(err) + var e notFound + return errors.As(err, &e) } type objectNotFoundError struct { @@ -59,35 +58,11 @@ func (e objectNotFoundError) Error() string { return fmt.Sprintf("Error: No such %s: %s", e.object, e.id) } -func wrapResponseError(err error, resp serverResponse, object, id string) error { - switch { - case err == nil: - return nil - case resp.statusCode == http.StatusNotFound: - return objectNotFoundError{object: object, id: id} - case resp.statusCode == http.StatusNotImplemented: - return errdefs.NotImplemented(err) - default: - return err - } -} - -// unauthorizedError represents an authorization error in a remote registry. -type unauthorizedError struct { - cause error -} - -// Error returns a string representation of an unauthorizedError -func (u unauthorizedError) Error() string { - return u.cause.Error() -} - // IsErrUnauthorized returns true if the error is caused // when a remote registry authentication fails +// +// Deprecated: use errdefs.IsUnauthorized func IsErrUnauthorized(err error) bool { - if _, ok := err.(unauthorizedError); ok { - return ok - } return errdefs.IsUnauthorized(err) } @@ -99,32 +74,12 @@ func (e pluginPermissionDenied) Error() string { return "Permission denied while installing plugin " + e.name } -// IsErrPluginPermissionDenied returns true if the error is caused -// when a user denies a plugin's permissions -func IsErrPluginPermissionDenied(err error) bool { - _, ok := err.(pluginPermissionDenied) - return ok -} - -type notImplementedError struct { - message string -} - -func (e notImplementedError) Error() string { - return e.message -} - -func (e notImplementedError) NotImplemented() bool { - return true -} - // IsErrNotImplemented returns true if the error is a NotImplemented error. // This is returned by the API when a requested feature has not been // implemented. +// +// Deprecated: use errdefs.IsNotImplemented func IsErrNotImplemented(err error) bool { - if _, ok := err.(notImplementedError); ok { - return ok - } return errdefs.IsNotImplemented(err) } diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go index f0dc9d9e12..a9c48a9288 100644 --- a/vendor/github.com/docker/docker/client/events.go +++ b/vendor/github.com/docker/docker/client/events.go @@ -17,7 +17,6 @@ import ( // be sent over the error channel. If an error is sent all processing will be stopped. It's up // to the caller to reopen the stream in the event of an error by reinvoking this method. func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) { - messages := make(chan events.Message) errs := make(chan error, 1) diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go index e1dc49ef0f..6bdacab10a 100644 --- a/vendor/github.com/docker/docker/client/hijack.go +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -12,6 +12,7 @@ import ( "time" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" "github.com/docker/go-connections/sockets" "github.com/pkg/errors" ) @@ -30,12 +31,12 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu } req = cli.addHeaders(req, headers) - conn, err := cli.setupHijackConn(ctx, req, "tcp") + conn, mediaType, err := cli.setupHijackConn(ctx, req, "tcp") if err != nil { return types.HijackedResponse{}, err } - return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err + return types.NewHijackedResponse(conn, mediaType), err } // DialHijack returns a hijacked connection with negotiated protocol proto. @@ -46,7 +47,8 @@ func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[s } req = cli.addHeaders(req, meta) - return cli.setupHijackConn(ctx, req, proto) + conn, _, err := cli.setupHijackConn(ctx, req, proto) + return conn, err } // fallbackDial is used when WithDialer() was not called. @@ -61,7 +63,7 @@ func fallbackDial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { return net.Dial(proto, addr) } -func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto string) (net.Conn, error) { +func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto string) (net.Conn, string, error) { req.Host = cli.addr req.Header.Set("Connection", "Upgrade") req.Header.Set("Upgrade", proto) @@ -69,7 +71,7 @@ func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto dialer := cli.Dialer() conn, err := dialer(ctx) if err != nil { - return nil, errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") + return nil, "", errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") } // When we set up a TCP connection for hijack, there could be long periods @@ -91,18 +93,18 @@ func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto //nolint:staticcheck // ignore SA1019 for connecting to old (pre go1.8) daemons if err != httputil.ErrPersistEOF { if err != nil { - return nil, err + return nil, "", err } if resp.StatusCode != http.StatusSwitchingProtocols { resp.Body.Close() - return nil, fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) + return nil, "", fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) } } c, br := clientconn.Hijack() if br.Buffered() > 0 { // If there is buffered content, wrap the connection. We return an - // object that implements CloseWrite iff the underlying connection + // object that implements CloseWrite if the underlying connection // implements it. if _, ok := c.(types.CloseWriter); ok { c = &hijackedConnCloseWriter{&hijackedConn{c, br}} @@ -113,7 +115,13 @@ func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto br.Reset(nil) } - return c, nil + var mediaType string + if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.42") { + // Prior to 1.42, Content-Type is always set to raw-stream and not relevant + mediaType = resp.Header.Get("Content-Type") + } + + return c, mediaType, nil } // hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go index 8fcf995036..d16e1d8ea9 100644 --- a/vendor/github.com/docker/docker/client/image_build.go +++ b/vendor/github.com/docker/docker/client/image_build.go @@ -14,8 +14,8 @@ import ( "github.com/docker/docker/api/types/container" ) -// ImageBuild sends request to the daemon to build images. -// The Body in the response implement an io.ReadCloser and it's up to the caller to +// ImageBuild sends a request to the daemon to build images. +// The Body in the response implements an io.ReadCloser and it's up to the caller to // close it. func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { query, err := cli.imageBuildOptionsToQuery(options) diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go index 239380474e..b1c0227775 100644 --- a/vendor/github.com/docker/docker/client/image_create.go +++ b/vendor/github.com/docker/docker/client/image_create.go @@ -10,7 +10,7 @@ import ( "github.com/docker/docker/api/types" ) -// ImageCreate creates a new image based in the parent options. +// ImageCreate creates a new image based on the parent options. // It returns the JSON content in the response body. func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { ref, err := reference.ParseNormalizedNamed(parentReference) diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go index d3336d4106..c5de42cb79 100644 --- a/vendor/github.com/docker/docker/client/image_import.go +++ b/vendor/github.com/docker/docker/client/image_import.go @@ -10,7 +10,7 @@ import ( "github.com/docker/docker/api/types" ) -// ImageImport creates a new image based in the source options. +// ImageImport creates a new image based on the source options. // It returns the JSON content in the response body. func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { if ref != "" { diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go index 1eb8dce025..1de10e5a08 100644 --- a/vendor/github.com/docker/docker/client/image_inspect.go +++ b/vendor/github.com/docker/docker/client/image_inspect.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "encoding/json" - "io/ioutil" + "io" "github.com/docker/docker/api/types" ) @@ -17,10 +17,10 @@ func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (typ serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) defer ensureReaderClosed(serverResp) if err != nil { - return types.ImageInspect{}, nil, wrapResponseError(err, serverResp, "image", imageID) + return types.ImageInspect{}, nil, err } - body, err := ioutil.ReadAll(serverResp.body) + body, err := io.ReadAll(serverResp.body) if err != nil { return types.ImageInspect{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go index a4d7505094..950d513334 100644 --- a/vendor/github.com/docker/docker/client/image_list.go +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -34,6 +34,9 @@ func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions if options.All { query.Set("all", "1") } + if options.SharedSize && versions.GreaterThanOrEqualTo(cli.version, "1.42") { + query.Set("shared-size", "1") + } serverResp, err := cli.get(ctx, "/images/json", query, nil) defer ensureReaderClosed(serverResp) diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go index 84a41af0f2..6a9fb3f41f 100644 --- a/vendor/github.com/docker/docker/client/image_remove.go +++ b/vendor/github.com/docker/docker/client/image_remove.go @@ -23,7 +23,7 @@ func (cli *Client) ImageRemove(ctx context.Context, imageID string, options type resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) defer ensureReaderClosed(resp) if err != nil { - return dels, wrapResponseError(err, resp, "image", imageID) + return dels, err } err = json.NewDecoder(resp.body).Decode(&dels) diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go index 82955a7477..e69fa37225 100644 --- a/vendor/github.com/docker/docker/client/image_search.go +++ b/vendor/github.com/docker/docker/client/image_search.go @@ -3,8 +3,8 @@ package client // import "github.com/docker/docker/client" import ( "context" "encoding/json" - "fmt" "net/url" + "strconv" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" @@ -12,13 +12,15 @@ import ( "github.com/docker/docker/errdefs" ) -// ImageSearch makes the docker host to search by a term in a remote registry. +// ImageSearch makes the docker host search by a term in a remote registry. // The list of results is not sorted in any fashion. func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { var results []registry.SearchResult query := url.Values{} query.Set("term", term) - query.Set("limit", fmt.Sprintf("%d", options.Limit)) + if options.Limit > 0 { + query.Set("limit", strconv.Itoa(options.Limit)) + } if options.Filters.Len() > 0 { filterJSON, err := filters.ToJSON(options.Filters) diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go index aabad4a911..e9c1ed722e 100644 --- a/vendor/github.com/docker/docker/client/interface.go +++ b/vendor/github.com/docker/docker/client/interface.go @@ -5,17 +5,16 @@ import ( "io" "net" "net/http" - "time" "github.com/docker/docker/api/types" - containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/image" - networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" - volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/api/types/volume" specs "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -48,8 +47,8 @@ type CommonAPIClient interface { type ContainerAPIClient interface { ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) - ContainerCreate(ctx context.Context, config *containertypes.Config, hostConfig *containertypes.HostConfig, networkingConfig *networktypes.NetworkingConfig, platform *specs.Platform, containerName string) (containertypes.ContainerCreateCreatedBody, error) - ContainerDiff(ctx context.Context, container string) ([]containertypes.ContainerChangeResponseItem, error) + ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.CreateResponse, error) + ContainerDiff(ctx context.Context, container string) ([]container.ContainerChangeResponseItem, error) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) @@ -65,16 +64,16 @@ type ContainerAPIClient interface { ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error ContainerRename(ctx context.Context, container, newContainerName string) error ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error - ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error + ContainerRestart(ctx context.Context, container string, options container.StopOptions) error ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) ContainerStatsOneShot(ctx context.Context, container string) (types.ContainerStats, error) ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error - ContainerStop(ctx context.Context, container string, timeout *time.Duration) error - ContainerTop(ctx context.Context, container string, arguments []string) (containertypes.ContainerTopOKBody, error) + ContainerStop(ctx context.Context, container string, options container.StopOptions) error + ContainerTop(ctx context.Context, container string, arguments []string) (container.ContainerTopOKBody, error) ContainerUnpause(ctx context.Context, container string) error - ContainerUpdate(ctx context.Context, container string, updateConfig containertypes.UpdateConfig) (containertypes.ContainerUpdateOKBody, error) - ContainerWait(ctx context.Context, container string, condition containertypes.WaitCondition) (<-chan containertypes.ContainerWaitOKBody, <-chan error) + ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) + ContainerWait(ctx context.Context, container string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) @@ -107,7 +106,7 @@ type ImageAPIClient interface { // NetworkAPIClient defines API client methods for the networks type NetworkAPIClient interface { - NetworkConnect(ctx context.Context, network, container string, config *networktypes.EndpointSettings) error + NetworkConnect(ctx context.Context, network, container string, config *network.EndpointSettings) error NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) NetworkDisconnect(ctx context.Context, network, container string, force bool) error NetworkInspect(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, error) @@ -168,18 +167,19 @@ type SystemAPIClient interface { Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) Info(ctx context.Context) (types.Info, error) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) - DiskUsage(ctx context.Context) (types.DiskUsage, error) + DiskUsage(ctx context.Context, options types.DiskUsageOptions) (types.DiskUsage, error) Ping(ctx context.Context) (types.Ping, error) } // VolumeAPIClient defines API client methods for the volumes type VolumeAPIClient interface { - VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) - VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) - VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) - VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) + VolumeCreate(ctx context.Context, options volume.CreateOptions) (volume.Volume, error) + VolumeInspect(ctx context.Context, volumeID string) (volume.Volume, error) + VolumeInspectWithRaw(ctx context.Context, volumeID string) (volume.Volume, []byte, error) + VolumeList(ctx context.Context, filter filters.Args) (volume.ListResponse, error) VolumeRemove(ctx context.Context, volumeID string, force bool) error VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error) + VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error } // SecretAPIClient defines API client methods for secrets diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go index 89a05b3021..0f90e2bb90 100644 --- a/vendor/github.com/docker/docker/client/network_inspect.go +++ b/vendor/github.com/docker/docker/client/network_inspect.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "encoding/json" - "io/ioutil" + "io" "net/url" "github.com/docker/docker/api/types" @@ -36,10 +36,10 @@ func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, resp, err = cli.get(ctx, "/networks/"+networkID, query, nil) defer ensureReaderClosed(resp) if err != nil { - return networkResource, nil, wrapResponseError(err, resp, "network", networkID) + return networkResource, nil, err } - body, err := ioutil.ReadAll(resp.body) + body, err := io.ReadAll(resp.body) if err != nil { return networkResource, nil, err } diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go index e71b16d869..9d6c6cef07 100644 --- a/vendor/github.com/docker/docker/client/network_remove.go +++ b/vendor/github.com/docker/docker/client/network_remove.go @@ -6,5 +6,5 @@ import "context" func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "network", networkID) + return err } diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go index d296c9fdde..95ab9b1be0 100644 --- a/vendor/github.com/docker/docker/client/node_inspect.go +++ b/vendor/github.com/docker/docker/client/node_inspect.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "encoding/json" - "io/ioutil" + "io" "github.com/docker/docker/api/types/swarm" ) @@ -17,10 +17,10 @@ func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) defer ensureReaderClosed(serverResp) if err != nil { - return swarm.Node{}, nil, wrapResponseError(err, serverResp, "node", nodeID) + return swarm.Node{}, nil, err } - body, err := ioutil.ReadAll(serverResp.body) + body, err := io.ReadAll(serverResp.body) if err != nil { return swarm.Node{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go index 03ab878097..e44436debc 100644 --- a/vendor/github.com/docker/docker/client/node_remove.go +++ b/vendor/github.com/docker/docker/client/node_remove.go @@ -16,5 +16,5 @@ func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types. resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "node", nodeID) + return err } diff --git a/vendor/github.com/docker/docker/client/node_update.go b/vendor/github.com/docker/docker/client/node_update.go index de32a617fb..0d0fc3b788 100644 --- a/vendor/github.com/docker/docker/client/node_update.go +++ b/vendor/github.com/docker/docker/client/node_update.go @@ -3,7 +3,6 @@ package client // import "github.com/docker/docker/client" import ( "context" "net/url" - "strconv" "github.com/docker/docker/api/types/swarm" ) @@ -11,7 +10,7 @@ import ( // NodeUpdate updates a Node. func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { query := url.Values{} - query.Set("version", strconv.FormatUint(version.Index, 10)) + query.Set("version", version.String()) resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil) ensureReaderClosed(resp) return err diff --git a/vendor/github.com/docker/docker/client/options.go b/vendor/github.com/docker/docker/client/options.go index 6f77f0955f..099ad41846 100644 --- a/vendor/github.com/docker/docker/client/options.go +++ b/vendor/github.com/docker/docker/client/options.go @@ -18,51 +18,32 @@ type Opt func(*Client) error // FromEnv configures the client with values from environment variables. // -// Supported environment variables: -// DOCKER_HOST to set the url to the docker server. -// DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. -// DOCKER_CERT_PATH to load the TLS certificates from. -// DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. +// FromEnv uses the following environment variables: +// +// DOCKER_HOST (EnvOverrideHost) to set the URL to the docker server. +// +// DOCKER_API_VERSION (EnvOverrideAPIVersion) to set the version of the API to +// use, leave empty for latest. +// +// DOCKER_CERT_PATH (EnvOverrideCertPath) to specify the directory from which to +// load the TLS certificates (ca.pem, cert.pem, key.pem). +// +// DOCKER_TLS_VERIFY (EnvTLSVerify) to enable or disable TLS verification (off by +// default). func FromEnv(c *Client) error { - if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" { - options := tlsconfig.Options{ - CAFile: filepath.Join(dockerCertPath, "ca.pem"), - CertFile: filepath.Join(dockerCertPath, "cert.pem"), - KeyFile: filepath.Join(dockerCertPath, "key.pem"), - InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "", - } - tlsc, err := tlsconfig.Client(options) - if err != nil { - return err - } - - c.client = &http.Client{ - Transport: &http.Transport{TLSClientConfig: tlsc}, - CheckRedirect: CheckRedirect, - } - } - - if host := os.Getenv("DOCKER_HOST"); host != "" { - if err := WithHost(host)(c); err != nil { - return err - } + ops := []Opt{ + WithTLSClientConfigFromEnv(), + WithHostFromEnv(), + WithVersionFromEnv(), } - - if version := os.Getenv("DOCKER_API_VERSION"); version != "" { - if err := WithVersion(version)(c); err != nil { + for _, op := range ops { + if err := op(c); err != nil { return err } } return nil } -// WithDialer applies the dialer.DialContext to the client transport. This can be -// used to set the Timeout and KeepAlive settings of the client. -// Deprecated: use WithDialContext -func WithDialer(dialer *net.Dialer) Opt { - return WithDialContext(dialer.DialContext) -} - // WithDialContext applies the dialer to the client transport. This can be // used to set the Timeout and KeepAlive settings of the client. func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt { @@ -93,6 +74,18 @@ func WithHost(host string) Opt { } } +// WithHostFromEnv overrides the client host with the host specified in the +// DOCKER_HOST (EnvOverrideHost) environment variable. If DOCKER_HOST is not set, +// or set to an empty value, the host is not modified. +func WithHostFromEnv() Opt { + return func(c *Client) error { + if host := os.Getenv(EnvOverrideHost); host != "" { + return WithHost(host)(c) + } + return nil + } +} + // WithHTTPClient overrides the client http client with the specified one func WithHTTPClient(client *http.Client) Opt { return func(c *Client) error { @@ -148,6 +141,42 @@ func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt { } } +// WithTLSClientConfigFromEnv configures the client's TLS settings with the +// settings in the DOCKER_CERT_PATH and DOCKER_TLS_VERIFY environment variables. +// If DOCKER_CERT_PATH is not set or empty, TLS configuration is not modified. +// +// WithTLSClientConfigFromEnv uses the following environment variables: +// +// DOCKER_CERT_PATH (EnvOverrideCertPath) to specify the directory from which to +// load the TLS certificates (ca.pem, cert.pem, key.pem). +// +// DOCKER_TLS_VERIFY (EnvTLSVerify) to enable or disable TLS verification (off by +// default). +func WithTLSClientConfigFromEnv() Opt { + return func(c *Client) error { + dockerCertPath := os.Getenv(EnvOverrideCertPath) + if dockerCertPath == "" { + return nil + } + options := tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, "ca.pem"), + CertFile: filepath.Join(dockerCertPath, "cert.pem"), + KeyFile: filepath.Join(dockerCertPath, "key.pem"), + InsecureSkipVerify: os.Getenv(EnvTLSVerify) == "", + } + tlsc, err := tlsconfig.Client(options) + if err != nil { + return err + } + + c.client = &http.Client{ + Transport: &http.Transport{TLSClientConfig: tlsc}, + CheckRedirect: CheckRedirect, + } + return nil + } +} + // WithVersion overrides the client version with the specified one. If an empty // version is specified, the value will be ignored to allow version negotiation. func WithVersion(version string) Opt { @@ -160,6 +189,15 @@ func WithVersion(version string) Opt { } } +// WithVersionFromEnv overrides the client version with the version specified in +// the DOCKER_API_VERSION environment variable. If DOCKER_API_VERSION is not set, +// the version is not modified. +func WithVersionFromEnv() Opt { + return func(c *Client) error { + return WithVersion(os.Getenv(EnvOverrideAPIVersion))(c) + } +} + // WithAPIVersionNegotiation enables automatic API version negotiation for the client. // With this option enabled, the client automatically negotiates the API version // to use when making requests. API version negotiation is performed on the first diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go index a9af001ef4..27e8695cb5 100644 --- a/vendor/github.com/docker/docker/client/ping.go +++ b/vendor/github.com/docker/docker/client/ping.go @@ -4,8 +4,10 @@ import ( "context" "net/http" "path" + "strings" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/errdefs" ) @@ -61,6 +63,13 @@ func parsePingResponse(cli *Client, resp serverResponse) (types.Ping, error) { if bv := resp.header.Get("Builder-Version"); bv != "" { ping.BuilderVersion = types.BuilderVersion(bv) } + if si := resp.header.Get("Swarm"); si != "" { + parts := strings.SplitN(si, "/", 2) + ping.SwarmStatus = &swarm.Status{ + NodeState: swarm.LocalNodeState(parts[0]), + ControlAvailable: len(parts) == 2 && parts[1] == "manager", + } + } err := cli.checkResponseErr(resp) return ping, errdefs.FromStatusCode(err, resp.statusCode) } diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go index 81b89732b0..f09e460660 100644 --- a/vendor/github.com/docker/docker/client/plugin_inspect.go +++ b/vendor/github.com/docker/docker/client/plugin_inspect.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "encoding/json" - "io/ioutil" + "io" "github.com/docker/docker/api/types" ) @@ -17,10 +17,10 @@ func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*type resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) defer ensureReaderClosed(resp) if err != nil { - return nil, nil, wrapResponseError(err, resp, "plugin", name) + return nil, nil, err } - body, err := ioutil.ReadAll(resp.body) + body, err := io.ReadAll(resp.body) if err != nil { return nil, nil, err } diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go index cf1935e2f5..2091a054d6 100644 --- a/vendor/github.com/docker/docker/client/plugin_list.go +++ b/vendor/github.com/docker/docker/client/plugin_list.go @@ -25,7 +25,7 @@ func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.P resp, err := cli.get(ctx, "/plugins", query, nil) defer ensureReaderClosed(resp) if err != nil { - return plugins, wrapResponseError(err, resp, "plugin", "") + return plugins, err } err = json.NewDecoder(resp.body).Decode(&plugins) diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go index 51ca1040d6..4cd66958c3 100644 --- a/vendor/github.com/docker/docker/client/plugin_remove.go +++ b/vendor/github.com/docker/docker/client/plugin_remove.go @@ -16,5 +16,5 @@ func (cli *Client) PluginRemove(ctx context.Context, name string, options types. resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "plugin", name) + return err } diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go index 7f54b1dd80..c799095c12 100644 --- a/vendor/github.com/docker/docker/client/request.go +++ b/vendor/github.com/docker/docker/client/request.go @@ -6,7 +6,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net" "net/http" "net/url" @@ -50,6 +49,14 @@ func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, b return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) } +func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers) +} + // putRaw sends an http request to the docker API using the method PUT. func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers) @@ -110,11 +117,16 @@ func (cli *Client) sendRequest(ctx context.Context, method, path string, query u if err != nil { return serverResponse{}, err } + resp, err := cli.doRequest(ctx, req) - if err != nil { - return resp, errdefs.FromStatusCode(err, resp.statusCode) + switch { + case errors.Is(err, context.Canceled): + return serverResponse{}, errdefs.Cancelled(err) + case errors.Is(err, context.DeadlineExceeded): + return serverResponse{}, errdefs.Deadline(err) + case err == nil: + err = cli.checkResponseErr(resp) } - err = cli.checkResponseErr(resp) return resp, errdefs.FromStatusCode(err, resp.statusCode) } @@ -129,7 +141,7 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp } if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") { - return serverResp, errors.Wrap(err, "The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings") + return serverResp, errors.Wrap(err, "the server probably has client authentication (--tlsverify) enabled; check your TLS client certification settings") } // Don't decorate context sentinel errors; users may be comparing to @@ -141,7 +153,7 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp if nErr, ok := err.(*url.Error); ok { if nErr, ok := nErr.Err.(*net.OpError); ok { if os.IsPermission(nErr.Err) { - return serverResp, errors.Wrapf(err, "Got permission denied while trying to connect to the Docker daemon socket at %v", cli.host) + return serverResp, errors.Wrapf(err, "permission denied while trying to connect to the Docker daemon socket at %v", cli.host) } } } @@ -168,10 +180,10 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { // Checks if client is running with elevated privileges if f, elevatedErr := os.Open("\\\\.\\PHYSICALDRIVE0"); elevatedErr == nil { - err = errors.Wrap(err, "In the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect.") + err = errors.Wrap(err, "in the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect") } else { f.Close() - err = errors.Wrap(err, "This error may indicate that the docker daemon is not running.") + err = errors.Wrap(err, "this error may indicate that the docker daemon is not running") } } @@ -199,7 +211,7 @@ func (cli *Client) checkResponseErr(serverResp serverResponse) error { R: serverResp.body, N: int64(bodyMax), } - body, err = ioutil.ReadAll(bodyR) + body, err = io.ReadAll(bodyR) if err != nil { return err } @@ -234,14 +246,14 @@ func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request // Add CLI Config's HTTP Headers BEFORE we set the Docker headers // then the user can't change OUR headers for k, v := range cli.customHTTPHeaders { - if versions.LessThan(cli.version, "1.25") && k == "User-Agent" { + if versions.LessThan(cli.version, "1.25") && http.CanonicalHeaderKey(k) == "User-Agent" { continue } req.Header.Set(k, v) } for k, v := range headers { - req.Header[k] = v + req.Header[http.CanonicalHeaderKey(k)] = v } return req } @@ -259,7 +271,7 @@ func encodeData(data interface{}) (*bytes.Buffer, error) { func ensureReaderClosed(response serverResponse) { if response.body != nil { // Drain up to 512 bytes and close the body to let the Transport reuse the connection - io.CopyN(ioutil.Discard, response.body, 512) + io.CopyN(io.Discard, response.body, 512) response.body.Close() } } diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go index fd5b914136..c65d38a191 100644 --- a/vendor/github.com/docker/docker/client/secret_create.go +++ b/vendor/github.com/docker/docker/client/secret_create.go @@ -8,7 +8,7 @@ import ( "github.com/docker/docker/api/types/swarm" ) -// SecretCreate creates a new Secret. +// SecretCreate creates a new secret. func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { var response types.SecretCreateResponse if err := cli.NewVersionError("1.25", "secret create"); err != nil { diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go index d093916c9a..5906874b15 100644 --- a/vendor/github.com/docker/docker/client/secret_inspect.go +++ b/vendor/github.com/docker/docker/client/secret_inspect.go @@ -4,7 +4,7 @@ import ( "bytes" "context" "encoding/json" - "io/ioutil" + "io" "github.com/docker/docker/api/types/swarm" ) @@ -20,10 +20,10 @@ func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.S resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) defer ensureReaderClosed(resp) if err != nil { - return swarm.Secret{}, nil, wrapResponseError(err, resp, "secret", id) + return swarm.Secret{}, nil, err } - body, err := ioutil.ReadAll(resp.body) + body, err := io.ReadAll(resp.body) if err != nil { return swarm.Secret{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go index c16f555804..f47f68b6e0 100644 --- a/vendor/github.com/docker/docker/client/secret_remove.go +++ b/vendor/github.com/docker/docker/client/secret_remove.go @@ -2,12 +2,12 @@ package client // import "github.com/docker/docker/client" import "context" -// SecretRemove removes a Secret. +// SecretRemove removes a secret. func (cli *Client) SecretRemove(ctx context.Context, id string) error { if err := cli.NewVersionError("1.25", "secret remove"); err != nil { return err } resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "secret", id) + return err } diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go index 164256bbc1..2e939e8ced 100644 --- a/vendor/github.com/docker/docker/client/secret_update.go +++ b/vendor/github.com/docker/docker/client/secret_update.go @@ -3,18 +3,17 @@ package client // import "github.com/docker/docker/client" import ( "context" "net/url" - "strconv" "github.com/docker/docker/api/types/swarm" ) -// SecretUpdate attempts to update a Secret +// SecretUpdate attempts to update a secret. func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { if err := cli.NewVersionError("1.25", "secret update"); err != nil { return err } query := url.Values{} - query.Set("version", strconv.FormatUint(version.Index, 10)) + query.Set("version", version.String()) resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil) ensureReaderClosed(resp) return err diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go index e0428bf98b..23024d0f8f 100644 --- a/vendor/github.com/docker/docker/client/service_create.go +++ b/vendor/github.com/docker/docker/client/service_create.go @@ -9,11 +9,11 @@ import ( "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) -// ServiceCreate creates a new Service. +// ServiceCreate creates a new service. func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { var response types.ServiceCreateResponse headers := map[string][]string{ diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go index 2801483b80..cee020c98b 100644 --- a/vendor/github.com/docker/docker/client/service_inspect.go +++ b/vendor/github.com/docker/docker/client/service_inspect.go @@ -5,7 +5,7 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "io" "net/url" "github.com/docker/docker/api/types" @@ -22,10 +22,10 @@ func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil) defer ensureReaderClosed(serverResp) if err != nil { - return swarm.Service{}, nil, wrapResponseError(err, serverResp, "service", serviceID) + return swarm.Service{}, nil, err } - body, err := ioutil.ReadAll(serverResp.body) + body, err := io.ReadAll(serverResp.body) if err != nil { return swarm.Service{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go index 953a2adf5a..2c46326ebc 100644 --- a/vendor/github.com/docker/docker/client/service_remove.go +++ b/vendor/github.com/docker/docker/client/service_remove.go @@ -6,5 +6,5 @@ import "context" func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "service", serviceID) + return err } diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go index c63895f74f..8014b86258 100644 --- a/vendor/github.com/docker/docker/client/service_update.go +++ b/vendor/github.com/docker/docker/client/service_update.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "net/url" - "strconv" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" @@ -35,7 +34,7 @@ func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version query.Set("rollback", options.Rollback) } - query.Set("version", strconv.FormatUint(version.Index, 10)) + query.Set("version", version.String()) if err := validateServiceSpec(service); err != nil { return response, err diff --git a/vendor/github.com/docker/docker/client/swarm_update.go b/vendor/github.com/docker/docker/client/swarm_update.go index 56a5bea761..9fde7d75ee 100644 --- a/vendor/github.com/docker/docker/client/swarm_update.go +++ b/vendor/github.com/docker/docker/client/swarm_update.go @@ -2,7 +2,6 @@ package client // import "github.com/docker/docker/client" import ( "context" - "fmt" "net/url" "strconv" @@ -12,10 +11,10 @@ import ( // SwarmUpdate updates the swarm. func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { query := url.Values{} - query.Set("version", strconv.FormatUint(version.Index, 10)) - query.Set("rotateWorkerToken", fmt.Sprintf("%v", flags.RotateWorkerToken)) - query.Set("rotateManagerToken", fmt.Sprintf("%v", flags.RotateManagerToken)) - query.Set("rotateManagerUnlockKey", fmt.Sprintf("%v", flags.RotateManagerUnlockKey)) + query.Set("version", version.String()) + query.Set("rotateWorkerToken", strconv.FormatBool(flags.RotateWorkerToken)) + query.Set("rotateManagerToken", strconv.FormatBool(flags.RotateManagerToken)) + query.Set("rotateManagerUnlockKey", strconv.FormatBool(flags.RotateManagerUnlockKey)) resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil) ensureReaderClosed(resp) return err diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go index 44d40ba5ae..dde1f6c59d 100644 --- a/vendor/github.com/docker/docker/client/task_inspect.go +++ b/vendor/github.com/docker/docker/client/task_inspect.go @@ -4,12 +4,12 @@ import ( "bytes" "context" "encoding/json" - "io/ioutil" + "io" "github.com/docker/docker/api/types/swarm" ) -// TaskInspectWithRaw returns the task information and its raw representation.. +// TaskInspectWithRaw returns the task information and its raw representation. func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { if taskID == "" { return swarm.Task{}, nil, objectNotFoundError{object: "task", id: taskID} @@ -17,10 +17,10 @@ func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) defer ensureReaderClosed(serverResp) if err != nil { - return swarm.Task{}, nil, wrapResponseError(err, serverResp, "task", taskID) + return swarm.Task{}, nil, err } - body, err := ioutil.ReadAll(serverResp.body) + body, err := io.ReadAll(serverResp.body) if err != nil { return swarm.Task{}, nil, err } diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go index 92761b3c63..b3b182437b 100644 --- a/vendor/github.com/docker/docker/client/volume_create.go +++ b/vendor/github.com/docker/docker/client/volume_create.go @@ -4,18 +4,17 @@ import ( "context" "encoding/json" - "github.com/docker/docker/api/types" - volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/api/types/volume" ) // VolumeCreate creates a volume in the docker host. -func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) { - var volume types.Volume +func (cli *Client) VolumeCreate(ctx context.Context, options volume.CreateOptions) (volume.Volume, error) { + var vol volume.Volume resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) defer ensureReaderClosed(resp) if err != nil { - return volume, err + return vol, err } - err = json.NewDecoder(resp.body).Decode(&volume) - return volume, err + err = json.NewDecoder(resp.body).Decode(&vol) + return vol, err } diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go index e20b2c67c7..b3ba4e6046 100644 --- a/vendor/github.com/docker/docker/client/volume_inspect.go +++ b/vendor/github.com/docker/docker/client/volume_inspect.go @@ -4,35 +4,35 @@ import ( "bytes" "context" "encoding/json" - "io/ioutil" + "io" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/volume" ) // VolumeInspect returns the information about a specific volume in the docker host. -func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) { - volume, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) - return volume, err +func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (volume.Volume, error) { + vol, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) + return vol, err } // VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation -func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) { +func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (volume.Volume, []byte, error) { if volumeID == "" { - return types.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID} + return volume.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID} } - var volume types.Volume + var vol volume.Volume resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) defer ensureReaderClosed(resp) if err != nil { - return volume, nil, wrapResponseError(err, resp, "volume", volumeID) + return vol, nil, err } - body, err := ioutil.ReadAll(resp.body) + body, err := io.ReadAll(resp.body) if err != nil { - return volume, nil, err + return vol, nil, err } rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&volume) - return volume, body, err + err = json.NewDecoder(rdr).Decode(&vol) + return vol, body, err } diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go index 942498dde2..d8204f8db5 100644 --- a/vendor/github.com/docker/docker/client/volume_list.go +++ b/vendor/github.com/docker/docker/client/volume_list.go @@ -6,12 +6,12 @@ import ( "net/url" "github.com/docker/docker/api/types/filters" - volumetypes "github.com/docker/docker/api/types/volume" + "github.com/docker/docker/api/types/volume" ) // VolumeList returns the volumes configured in the docker host. -func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) { - var volumes volumetypes.VolumeListOKBody +func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volume.ListResponse, error) { + var volumes volume.ListResponse query := url.Values{} if filter.Len() > 0 { diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go index 79decdafab..1f26438360 100644 --- a/vendor/github.com/docker/docker/client/volume_remove.go +++ b/vendor/github.com/docker/docker/client/volume_remove.go @@ -17,5 +17,5 @@ func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool } resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) defer ensureReaderClosed(resp) - return wrapResponseError(err, resp, "volume", volumeID) + return err } diff --git a/vendor/github.com/docker/docker/client/volume_update.go b/vendor/github.com/docker/docker/client/volume_update.go new file mode 100644 index 0000000000..33bd31e531 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_update.go @@ -0,0 +1,24 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/volume" +) + +// VolumeUpdate updates a volume. This only works for Cluster Volumes, and +// only some fields can be updated. +func (cli *Client) VolumeUpdate(ctx context.Context, volumeID string, version swarm.Version, options volume.UpdateOptions) error { + if err := cli.NewVersionError("1.42", "volume update"); err != nil { + return err + } + + query := url.Values{} + query.Set("version", version.String()) + + resp, err := cli.put(ctx, "/volumes/"+volumeID, query, options, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go index 5afe486779..77bda389d1 100644 --- a/vendor/github.com/docker/docker/errdefs/http_helpers.go +++ b/vendor/github.com/docker/docker/errdefs/http_helpers.go @@ -2,14 +2,12 @@ package errdefs // import "github.com/docker/docker/errdefs" import ( "net/http" - - "github.com/sirupsen/logrus" ) // FromStatusCode creates an errdef error, based on the provided HTTP status-code func FromStatusCode(err error, statusCode int) error { if err == nil { - return err + return nil } switch statusCode { case http.StatusNotFound: @@ -33,11 +31,6 @@ func FromStatusCode(err error, statusCode int) error { err = System(err) } default: - logrus.WithError(err).WithFields(logrus.Fields{ - "module": "api", - "status_code": statusCode, - }).Debug("FIXME: Got an status-code for which error does not match any expected type!!!") - switch { case statusCode >= 200 && statusCode < 400: // it's a client error diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go index 5e6310fdcd..7df039b4cf 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go @@ -91,3 +91,12 @@ func GetConfigHome() (string, error) { } return filepath.Join(home, ".config"), nil } + +// GetLibHome returns $HOME/.local/lib +func GetLibHome() (string, error) { + home := os.Getenv("HOME") + if home == "" { + return "", errors.New("could not get HOME") + } + return filepath.Join(home, ".local/lib"), nil +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go index fc48e674c1..11f1bec985 100644 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go @@ -26,3 +26,8 @@ func GetDataHome() (string, error) { func GetConfigHome() (string, error) { return "", errors.New("homedir.GetConfigHome() is not supported on this system") } + +// GetLibHome is unsupported on non-linux system. +func GetLibHome() (string, error) { + return "", errors.New("homedir.GetLibHome() is not supported on this system") +} diff --git a/vendor/github.com/emirpasic/gods/LICENSE b/vendor/github.com/emirpasic/gods/LICENSE new file mode 100644 index 0000000000..e5e449b6ec --- /dev/null +++ b/vendor/github.com/emirpasic/gods/LICENSE @@ -0,0 +1,41 @@ +Copyright (c) 2015, Emir Pasic +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------------------------- + +AVL Tree: + +Copyright (c) 2017 Benjamin Scher Purcell + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/emirpasic/gods/containers/containers.go b/vendor/github.com/emirpasic/gods/containers/containers.go new file mode 100644 index 0000000000..c35ab36d2c --- /dev/null +++ b/vendor/github.com/emirpasic/gods/containers/containers.go @@ -0,0 +1,35 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package containers provides core interfaces and functions for data structures. +// +// Container is the base interface for all data structures to implement. +// +// Iterators provide stateful iterators. +// +// Enumerable provides Ruby inspired (each, select, map, find, any?, etc.) container functions. +// +// Serialization provides serializers (marshalers) and deserializers (unmarshalers). +package containers + +import "github.com/emirpasic/gods/utils" + +// Container is base interface that all data structures implement. +type Container interface { + Empty() bool + Size() int + Clear() + Values() []interface{} +} + +// GetSortedValues returns sorted container's elements with respect to the passed comparator. +// Does not effect the ordering of elements within the container. +func GetSortedValues(container Container, comparator utils.Comparator) []interface{} { + values := container.Values() + if len(values) < 2 { + return values + } + utils.Sort(values, comparator) + return values +} diff --git a/vendor/github.com/emirpasic/gods/containers/enumerable.go b/vendor/github.com/emirpasic/gods/containers/enumerable.go new file mode 100644 index 0000000000..ac48b54531 --- /dev/null +++ b/vendor/github.com/emirpasic/gods/containers/enumerable.go @@ -0,0 +1,61 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package containers + +// EnumerableWithIndex provides functions for ordered containers whose values can be fetched by an index. +type EnumerableWithIndex interface { + // Each calls the given function once for each element, passing that element's index and value. + Each(func(index int, value interface{})) + + // Map invokes the given function once for each element and returns a + // container containing the values returned by the given function. + // TODO would appreciate help on how to enforce this in containers (don't want to type assert when chaining) + // Map(func(index int, value interface{}) interface{}) Container + + // Select returns a new container containing all elements for which the given function returns a true value. + // TODO need help on how to enforce this in containers (don't want to type assert when chaining) + // Select(func(index int, value interface{}) bool) Container + + // Any passes each element of the container to the given function and + // returns true if the function ever returns true for any element. + Any(func(index int, value interface{}) bool) bool + + // All passes each element of the container to the given function and + // returns true if the function returns true for all elements. + All(func(index int, value interface{}) bool) bool + + // Find passes each element of the container to the given function and returns + // the first (index,value) for which the function is true or -1,nil otherwise + // if no element matches the criteria. + Find(func(index int, value interface{}) bool) (int, interface{}) +} + +// EnumerableWithKey provides functions for ordered containers whose values whose elements are key/value pairs. +type EnumerableWithKey interface { + // Each calls the given function once for each element, passing that element's key and value. + Each(func(key interface{}, value interface{})) + + // Map invokes the given function once for each element and returns a container + // containing the values returned by the given function as key/value pairs. + // TODO need help on how to enforce this in containers (don't want to type assert when chaining) + // Map(func(key interface{}, value interface{}) (interface{}, interface{})) Container + + // Select returns a new container containing all elements for which the given function returns a true value. + // TODO need help on how to enforce this in containers (don't want to type assert when chaining) + // Select(func(key interface{}, value interface{}) bool) Container + + // Any passes each element of the container to the given function and + // returns true if the function ever returns true for any element. + Any(func(key interface{}, value interface{}) bool) bool + + // All passes each element of the container to the given function and + // returns true if the function returns true for all elements. + All(func(key interface{}, value interface{}) bool) bool + + // Find passes each element of the container to the given function and returns + // the first (key,value) for which the function is true or nil,nil otherwise if no element + // matches the criteria. + Find(func(key interface{}, value interface{}) bool) (interface{}, interface{}) +} diff --git a/vendor/github.com/emirpasic/gods/containers/iterator.go b/vendor/github.com/emirpasic/gods/containers/iterator.go new file mode 100644 index 0000000000..f1a52a365a --- /dev/null +++ b/vendor/github.com/emirpasic/gods/containers/iterator.go @@ -0,0 +1,109 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package containers + +// IteratorWithIndex is stateful iterator for ordered containers whose values can be fetched by an index. +type IteratorWithIndex interface { + // Next moves the iterator to the next element and returns true if there was a next element in the container. + // If Next() returns true, then next element's index and value can be retrieved by Index() and Value(). + // If Next() was called for the first time, then it will point the iterator to the first element if it exists. + // Modifies the state of the iterator. + Next() bool + + // Value returns the current element's value. + // Does not modify the state of the iterator. + Value() interface{} + + // Index returns the current element's index. + // Does not modify the state of the iterator. + Index() int + + // Begin resets the iterator to its initial state (one-before-first) + // Call Next() to fetch the first element if any. + Begin() + + // First moves the iterator to the first element and returns true if there was a first element in the container. + // If First() returns true, then first element's index and value can be retrieved by Index() and Value(). + // Modifies the state of the iterator. + First() bool +} + +// IteratorWithKey is a stateful iterator for ordered containers whose elements are key value pairs. +type IteratorWithKey interface { + // Next moves the iterator to the next element and returns true if there was a next element in the container. + // If Next() returns true, then next element's key and value can be retrieved by Key() and Value(). + // If Next() was called for the first time, then it will point the iterator to the first element if it exists. + // Modifies the state of the iterator. + Next() bool + + // Value returns the current element's value. + // Does not modify the state of the iterator. + Value() interface{} + + // Key returns the current element's key. + // Does not modify the state of the iterator. + Key() interface{} + + // Begin resets the iterator to its initial state (one-before-first) + // Call Next() to fetch the first element if any. + Begin() + + // First moves the iterator to the first element and returns true if there was a first element in the container. + // If First() returns true, then first element's key and value can be retrieved by Key() and Value(). + // Modifies the state of the iterator. + First() bool +} + +// ReverseIteratorWithIndex is stateful iterator for ordered containers whose values can be fetched by an index. +// +// Essentially it is the same as IteratorWithIndex, but provides additional: +// +// Prev() function to enable traversal in reverse +// +// Last() function to move the iterator to the last element. +// +// End() function to move the iterator past the last element (one-past-the-end). +type ReverseIteratorWithIndex interface { + // Prev moves the iterator to the previous element and returns true if there was a previous element in the container. + // If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value(). + // Modifies the state of the iterator. + Prev() bool + + // End moves the iterator past the last element (one-past-the-end). + // Call Prev() to fetch the last element if any. + End() + + // Last moves the iterator to the last element and returns true if there was a last element in the container. + // If Last() returns true, then last element's index and value can be retrieved by Index() and Value(). + // Modifies the state of the iterator. + Last() bool + + IteratorWithIndex +} + +// ReverseIteratorWithKey is a stateful iterator for ordered containers whose elements are key value pairs. +// +// Essentially it is the same as IteratorWithKey, but provides additional: +// +// Prev() function to enable traversal in reverse +// +// Last() function to move the iterator to the last element. +type ReverseIteratorWithKey interface { + // Prev moves the iterator to the previous element and returns true if there was a previous element in the container. + // If Prev() returns true, then previous element's key and value can be retrieved by Key() and Value(). + // Modifies the state of the iterator. + Prev() bool + + // End moves the iterator past the last element (one-past-the-end). + // Call Prev() to fetch the last element if any. + End() + + // Last moves the iterator to the last element and returns true if there was a last element in the container. + // If Last() returns true, then last element's key and value can be retrieved by Key() and Value(). + // Modifies the state of the iterator. + Last() bool + + IteratorWithKey +} diff --git a/vendor/github.com/emirpasic/gods/containers/serialization.go b/vendor/github.com/emirpasic/gods/containers/serialization.go new file mode 100644 index 0000000000..d7c90c83a0 --- /dev/null +++ b/vendor/github.com/emirpasic/gods/containers/serialization.go @@ -0,0 +1,17 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package containers + +// JSONSerializer provides JSON serialization +type JSONSerializer interface { + // ToJSON outputs the JSON representation of containers's elements. + ToJSON() ([]byte, error) +} + +// JSONDeserializer provides JSON deserialization +type JSONDeserializer interface { + // FromJSON populates containers's elements from the input JSON representation. + FromJSON([]byte) error +} diff --git a/vendor/github.com/emirpasic/gods/lists/arraylist/arraylist.go b/vendor/github.com/emirpasic/gods/lists/arraylist/arraylist.go new file mode 100644 index 0000000000..bfedac9eef --- /dev/null +++ b/vendor/github.com/emirpasic/gods/lists/arraylist/arraylist.go @@ -0,0 +1,228 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package arraylist implements the array list. +// +// Structure is not thread safe. +// +// Reference: https://en.wikipedia.org/wiki/List_%28abstract_data_type%29 +package arraylist + +import ( + "fmt" + "strings" + + "github.com/emirpasic/gods/lists" + "github.com/emirpasic/gods/utils" +) + +func assertListImplementation() { + var _ lists.List = (*List)(nil) +} + +// List holds the elements in a slice +type List struct { + elements []interface{} + size int +} + +const ( + growthFactor = float32(2.0) // growth by 100% + shrinkFactor = float32(0.25) // shrink when size is 25% of capacity (0 means never shrink) +) + +// New instantiates a new list and adds the passed values, if any, to the list +func New(values ...interface{}) *List { + list := &List{} + if len(values) > 0 { + list.Add(values...) + } + return list +} + +// Add appends a value at the end of the list +func (list *List) Add(values ...interface{}) { + list.growBy(len(values)) + for _, value := range values { + list.elements[list.size] = value + list.size++ + } +} + +// Get returns the element at index. +// Second return parameter is true if index is within bounds of the array and array is not empty, otherwise false. +func (list *List) Get(index int) (interface{}, bool) { + + if !list.withinRange(index) { + return nil, false + } + + return list.elements[index], true +} + +// Remove removes the element at the given index from the list. +func (list *List) Remove(index int) { + + if !list.withinRange(index) { + return + } + + list.elements[index] = nil // cleanup reference + copy(list.elements[index:], list.elements[index+1:list.size]) // shift to the left by one (slow operation, need ways to optimize this) + list.size-- + + list.shrink() +} + +// Contains checks if elements (one or more) are present in the set. +// All elements have to be present in the set for the method to return true. +// Performance time complexity of n^2. +// Returns true if no arguments are passed at all, i.e. set is always super-set of empty set. +func (list *List) Contains(values ...interface{}) bool { + + for _, searchValue := range values { + found := false + for _, element := range list.elements { + if element == searchValue { + found = true + break + } + } + if !found { + return false + } + } + return true +} + +// Values returns all elements in the list. +func (list *List) Values() []interface{} { + newElements := make([]interface{}, list.size, list.size) + copy(newElements, list.elements[:list.size]) + return newElements +} + +//IndexOf returns index of provided element +func (list *List) IndexOf(value interface{}) int { + if list.size == 0 { + return -1 + } + for index, element := range list.elements { + if element == value { + return index + } + } + return -1 +} + +// Empty returns true if list does not contain any elements. +func (list *List) Empty() bool { + return list.size == 0 +} + +// Size returns number of elements within the list. +func (list *List) Size() int { + return list.size +} + +// Clear removes all elements from the list. +func (list *List) Clear() { + list.size = 0 + list.elements = []interface{}{} +} + +// Sort sorts values (in-place) using. +func (list *List) Sort(comparator utils.Comparator) { + if len(list.elements) < 2 { + return + } + utils.Sort(list.elements[:list.size], comparator) +} + +// Swap swaps the two values at the specified positions. +func (list *List) Swap(i, j int) { + if list.withinRange(i) && list.withinRange(j) { + list.elements[i], list.elements[j] = list.elements[j], list.elements[i] + } +} + +// Insert inserts values at specified index position shifting the value at that position (if any) and any subsequent elements to the right. +// Does not do anything if position is negative or bigger than list's size +// Note: position equal to list's size is valid, i.e. append. +func (list *List) Insert(index int, values ...interface{}) { + + if !list.withinRange(index) { + // Append + if index == list.size { + list.Add(values...) + } + return + } + + l := len(values) + list.growBy(l) + list.size += l + copy(list.elements[index+l:], list.elements[index:list.size-l]) + copy(list.elements[index:], values) +} + +// Set the value at specified index +// Does not do anything if position is negative or bigger than list's size +// Note: position equal to list's size is valid, i.e. append. +func (list *List) Set(index int, value interface{}) { + + if !list.withinRange(index) { + // Append + if index == list.size { + list.Add(value) + } + return + } + + list.elements[index] = value +} + +// String returns a string representation of container +func (list *List) String() string { + str := "ArrayList\n" + values := []string{} + for _, value := range list.elements[:list.size] { + values = append(values, fmt.Sprintf("%v", value)) + } + str += strings.Join(values, ", ") + return str +} + +// Check that the index is within bounds of the list +func (list *List) withinRange(index int) bool { + return index >= 0 && index < list.size +} + +func (list *List) resize(cap int) { + newElements := make([]interface{}, cap, cap) + copy(newElements, list.elements) + list.elements = newElements +} + +// Expand the array if necessary, i.e. capacity will be reached if we add n elements +func (list *List) growBy(n int) { + // When capacity is reached, grow by a factor of growthFactor and add number of elements + currentCapacity := cap(list.elements) + if list.size+n >= currentCapacity { + newCapacity := int(growthFactor * float32(currentCapacity+n)) + list.resize(newCapacity) + } +} + +// Shrink the array if necessary, i.e. when size is shrinkFactor percent of current capacity +func (list *List) shrink() { + if shrinkFactor == 0.0 { + return + } + // Shrink when size is at shrinkFactor * capacity + currentCapacity := cap(list.elements) + if list.size <= int(float32(currentCapacity)*shrinkFactor) { + list.resize(list.size) + } +} diff --git a/vendor/github.com/emirpasic/gods/lists/arraylist/enumerable.go b/vendor/github.com/emirpasic/gods/lists/arraylist/enumerable.go new file mode 100644 index 0000000000..b3a8738825 --- /dev/null +++ b/vendor/github.com/emirpasic/gods/lists/arraylist/enumerable.go @@ -0,0 +1,79 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arraylist + +import "github.com/emirpasic/gods/containers" + +func assertEnumerableImplementation() { + var _ containers.EnumerableWithIndex = (*List)(nil) +} + +// Each calls the given function once for each element, passing that element's index and value. +func (list *List) Each(f func(index int, value interface{})) { + iterator := list.Iterator() + for iterator.Next() { + f(iterator.Index(), iterator.Value()) + } +} + +// Map invokes the given function once for each element and returns a +// container containing the values returned by the given function. +func (list *List) Map(f func(index int, value interface{}) interface{}) *List { + newList := &List{} + iterator := list.Iterator() + for iterator.Next() { + newList.Add(f(iterator.Index(), iterator.Value())) + } + return newList +} + +// Select returns a new container containing all elements for which the given function returns a true value. +func (list *List) Select(f func(index int, value interface{}) bool) *List { + newList := &List{} + iterator := list.Iterator() + for iterator.Next() { + if f(iterator.Index(), iterator.Value()) { + newList.Add(iterator.Value()) + } + } + return newList +} + +// Any passes each element of the collection to the given function and +// returns true if the function ever returns true for any element. +func (list *List) Any(f func(index int, value interface{}) bool) bool { + iterator := list.Iterator() + for iterator.Next() { + if f(iterator.Index(), iterator.Value()) { + return true + } + } + return false +} + +// All passes each element of the collection to the given function and +// returns true if the function returns true for all elements. +func (list *List) All(f func(index int, value interface{}) bool) bool { + iterator := list.Iterator() + for iterator.Next() { + if !f(iterator.Index(), iterator.Value()) { + return false + } + } + return true +} + +// Find passes each element of the container to the given function and returns +// the first (index,value) for which the function is true or -1,nil otherwise +// if no element matches the criteria. +func (list *List) Find(f func(index int, value interface{}) bool) (int, interface{}) { + iterator := list.Iterator() + for iterator.Next() { + if f(iterator.Index(), iterator.Value()) { + return iterator.Index(), iterator.Value() + } + } + return -1, nil +} diff --git a/vendor/github.com/emirpasic/gods/lists/arraylist/iterator.go b/vendor/github.com/emirpasic/gods/lists/arraylist/iterator.go new file mode 100644 index 0000000000..38a93f3a8f --- /dev/null +++ b/vendor/github.com/emirpasic/gods/lists/arraylist/iterator.go @@ -0,0 +1,83 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arraylist + +import "github.com/emirpasic/gods/containers" + +func assertIteratorImplementation() { + var _ containers.ReverseIteratorWithIndex = (*Iterator)(nil) +} + +// Iterator holding the iterator's state +type Iterator struct { + list *List + index int +} + +// Iterator returns a stateful iterator whose values can be fetched by an index. +func (list *List) Iterator() Iterator { + return Iterator{list: list, index: -1} +} + +// Next moves the iterator to the next element and returns true if there was a next element in the container. +// If Next() returns true, then next element's index and value can be retrieved by Index() and Value(). +// If Next() was called for the first time, then it will point the iterator to the first element if it exists. +// Modifies the state of the iterator. +func (iterator *Iterator) Next() bool { + if iterator.index < iterator.list.size { + iterator.index++ + } + return iterator.list.withinRange(iterator.index) +} + +// Prev moves the iterator to the previous element and returns true if there was a previous element in the container. +// If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value(). +// Modifies the state of the iterator. +func (iterator *Iterator) Prev() bool { + if iterator.index >= 0 { + iterator.index-- + } + return iterator.list.withinRange(iterator.index) +} + +// Value returns the current element's value. +// Does not modify the state of the iterator. +func (iterator *Iterator) Value() interface{} { + return iterator.list.elements[iterator.index] +} + +// Index returns the current element's index. +// Does not modify the state of the iterator. +func (iterator *Iterator) Index() int { + return iterator.index +} + +// Begin resets the iterator to its initial state (one-before-first) +// Call Next() to fetch the first element if any. +func (iterator *Iterator) Begin() { + iterator.index = -1 +} + +// End moves the iterator past the last element (one-past-the-end). +// Call Prev() to fetch the last element if any. +func (iterator *Iterator) End() { + iterator.index = iterator.list.size +} + +// First moves the iterator to the first element and returns true if there was a first element in the container. +// If First() returns true, then first element's index and value can be retrieved by Index() and Value(). +// Modifies the state of the iterator. +func (iterator *Iterator) First() bool { + iterator.Begin() + return iterator.Next() +} + +// Last moves the iterator to the last element and returns true if there was a last element in the container. +// If Last() returns true, then last element's index and value can be retrieved by Index() and Value(). +// Modifies the state of the iterator. +func (iterator *Iterator) Last() bool { + iterator.End() + return iterator.Prev() +} diff --git a/vendor/github.com/emirpasic/gods/lists/arraylist/serialization.go b/vendor/github.com/emirpasic/gods/lists/arraylist/serialization.go new file mode 100644 index 0000000000..2f283fb97d --- /dev/null +++ b/vendor/github.com/emirpasic/gods/lists/arraylist/serialization.go @@ -0,0 +1,29 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arraylist + +import ( + "encoding/json" + "github.com/emirpasic/gods/containers" +) + +func assertSerializationImplementation() { + var _ containers.JSONSerializer = (*List)(nil) + var _ containers.JSONDeserializer = (*List)(nil) +} + +// ToJSON outputs the JSON representation of list's elements. +func (list *List) ToJSON() ([]byte, error) { + return json.Marshal(list.elements[:list.size]) +} + +// FromJSON populates list's elements from the input JSON representation. +func (list *List) FromJSON(data []byte) error { + err := json.Unmarshal(data, &list.elements) + if err == nil { + list.size = len(list.elements) + } + return err +} diff --git a/vendor/github.com/emirpasic/gods/lists/lists.go b/vendor/github.com/emirpasic/gods/lists/lists.go new file mode 100644 index 0000000000..1f6bb08e94 --- /dev/null +++ b/vendor/github.com/emirpasic/gods/lists/lists.go @@ -0,0 +1,33 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lists provides an abstract List interface. +// +// In computer science, a list or sequence is an abstract data type that represents an ordered sequence of values, where the same value may occur more than once. An instance of a list is a computer representation of the mathematical concept of a finite sequence; the (potentially) infinite analog of a list is a stream. Lists are a basic example of containers, as they contain other values. If the same value occurs multiple times, each occurrence is considered a distinct item. +// +// Reference: https://en.wikipedia.org/wiki/List_%28abstract_data_type%29 +package lists + +import ( + "github.com/emirpasic/gods/containers" + "github.com/emirpasic/gods/utils" +) + +// List interface that all lists implement +type List interface { + Get(index int) (interface{}, bool) + Remove(index int) + Add(values ...interface{}) + Contains(values ...interface{}) bool + Sort(comparator utils.Comparator) + Swap(index1, index2 int) + Insert(index int, values ...interface{}) + Set(index int, value interface{}) + + containers.Container + // Empty() bool + // Size() int + // Clear() + // Values() []interface{} +} diff --git a/vendor/github.com/emirpasic/gods/trees/binaryheap/binaryheap.go b/vendor/github.com/emirpasic/gods/trees/binaryheap/binaryheap.go new file mode 100644 index 0000000000..70b28cf52d --- /dev/null +++ b/vendor/github.com/emirpasic/gods/trees/binaryheap/binaryheap.go @@ -0,0 +1,163 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package binaryheap implements a binary heap backed by array list. +// +// Comparator defines this heap as either min or max heap. +// +// Structure is not thread safe. +// +// References: http://en.wikipedia.org/wiki/Binary_heap +package binaryheap + +import ( + "fmt" + "github.com/emirpasic/gods/lists/arraylist" + "github.com/emirpasic/gods/trees" + "github.com/emirpasic/gods/utils" + "strings" +) + +func assertTreeImplementation() { + var _ trees.Tree = (*Heap)(nil) +} + +// Heap holds elements in an array-list +type Heap struct { + list *arraylist.List + Comparator utils.Comparator +} + +// NewWith instantiates a new empty heap tree with the custom comparator. +func NewWith(comparator utils.Comparator) *Heap { + return &Heap{list: arraylist.New(), Comparator: comparator} +} + +// NewWithIntComparator instantiates a new empty heap with the IntComparator, i.e. elements are of type int. +func NewWithIntComparator() *Heap { + return &Heap{list: arraylist.New(), Comparator: utils.IntComparator} +} + +// NewWithStringComparator instantiates a new empty heap with the StringComparator, i.e. elements are of type string. +func NewWithStringComparator() *Heap { + return &Heap{list: arraylist.New(), Comparator: utils.StringComparator} +} + +// Push adds a value onto the heap and bubbles it up accordingly. +func (heap *Heap) Push(values ...interface{}) { + if len(values) == 1 { + heap.list.Add(values[0]) + heap.bubbleUp() + } else { + // Reference: https://en.wikipedia.org/wiki/Binary_heap#Building_a_heap + for _, value := range values { + heap.list.Add(value) + } + size := heap.list.Size()/2 + 1 + for i := size; i >= 0; i-- { + heap.bubbleDownIndex(i) + } + } +} + +// Pop removes top element on heap and returns it, or nil if heap is empty. +// Second return parameter is true, unless the heap was empty and there was nothing to pop. +func (heap *Heap) Pop() (value interface{}, ok bool) { + value, ok = heap.list.Get(0) + if !ok { + return + } + lastIndex := heap.list.Size() - 1 + heap.list.Swap(0, lastIndex) + heap.list.Remove(lastIndex) + heap.bubbleDown() + return +} + +// Peek returns top element on the heap without removing it, or nil if heap is empty. +// Second return parameter is true, unless the heap was empty and there was nothing to peek. +func (heap *Heap) Peek() (value interface{}, ok bool) { + return heap.list.Get(0) +} + +// Empty returns true if heap does not contain any elements. +func (heap *Heap) Empty() bool { + return heap.list.Empty() +} + +// Size returns number of elements within the heap. +func (heap *Heap) Size() int { + return heap.list.Size() +} + +// Clear removes all elements from the heap. +func (heap *Heap) Clear() { + heap.list.Clear() +} + +// Values returns all elements in the heap. +func (heap *Heap) Values() []interface{} { + return heap.list.Values() +} + +// String returns a string representation of container +func (heap *Heap) String() string { + str := "BinaryHeap\n" + values := []string{} + for _, value := range heap.list.Values() { + values = append(values, fmt.Sprintf("%v", value)) + } + str += strings.Join(values, ", ") + return str +} + +// Performs the "bubble down" operation. This is to place the element that is at the root +// of the heap in its correct place so that the heap maintains the min/max-heap order property. +func (heap *Heap) bubbleDown() { + heap.bubbleDownIndex(0) +} + +// Performs the "bubble down" operation. This is to place the element that is at the index +// of the heap in its correct place so that the heap maintains the min/max-heap order property. +func (heap *Heap) bubbleDownIndex(index int) { + size := heap.list.Size() + for leftIndex := index<<1 + 1; leftIndex < size; leftIndex = index<<1 + 1 { + rightIndex := index<<1 + 2 + smallerIndex := leftIndex + leftValue, _ := heap.list.Get(leftIndex) + rightValue, _ := heap.list.Get(rightIndex) + if rightIndex < size && heap.Comparator(leftValue, rightValue) > 0 { + smallerIndex = rightIndex + } + indexValue, _ := heap.list.Get(index) + smallerValue, _ := heap.list.Get(smallerIndex) + if heap.Comparator(indexValue, smallerValue) > 0 { + heap.list.Swap(index, smallerIndex) + } else { + break + } + index = smallerIndex + } +} + +// Performs the "bubble up" operation. This is to place a newly inserted +// element (i.e. last element in the list) in its correct place so that +// the heap maintains the min/max-heap order property. +func (heap *Heap) bubbleUp() { + index := heap.list.Size() - 1 + for parentIndex := (index - 1) >> 1; index > 0; parentIndex = (index - 1) >> 1 { + indexValue, _ := heap.list.Get(index) + parentValue, _ := heap.list.Get(parentIndex) + if heap.Comparator(parentValue, indexValue) <= 0 { + break + } + heap.list.Swap(index, parentIndex) + index = parentIndex + } +} + +// Check that the index is within bounds of the list +func (heap *Heap) withinRange(index int) bool { + return index >= 0 && index < heap.list.Size() +} diff --git a/vendor/github.com/emirpasic/gods/trees/binaryheap/iterator.go b/vendor/github.com/emirpasic/gods/trees/binaryheap/iterator.go new file mode 100644 index 0000000000..beeb8d7013 --- /dev/null +++ b/vendor/github.com/emirpasic/gods/trees/binaryheap/iterator.go @@ -0,0 +1,84 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package binaryheap + +import "github.com/emirpasic/gods/containers" + +func assertIteratorImplementation() { + var _ containers.ReverseIteratorWithIndex = (*Iterator)(nil) +} + +// Iterator returns a stateful iterator whose values can be fetched by an index. +type Iterator struct { + heap *Heap + index int +} + +// Iterator returns a stateful iterator whose values can be fetched by an index. +func (heap *Heap) Iterator() Iterator { + return Iterator{heap: heap, index: -1} +} + +// Next moves the iterator to the next element and returns true if there was a next element in the container. +// If Next() returns true, then next element's index and value can be retrieved by Index() and Value(). +// If Next() was called for the first time, then it will point the iterator to the first element if it exists. +// Modifies the state of the iterator. +func (iterator *Iterator) Next() bool { + if iterator.index < iterator.heap.Size() { + iterator.index++ + } + return iterator.heap.withinRange(iterator.index) +} + +// Prev moves the iterator to the previous element and returns true if there was a previous element in the container. +// If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value(). +// Modifies the state of the iterator. +func (iterator *Iterator) Prev() bool { + if iterator.index >= 0 { + iterator.index-- + } + return iterator.heap.withinRange(iterator.index) +} + +// Value returns the current element's value. +// Does not modify the state of the iterator. +func (iterator *Iterator) Value() interface{} { + value, _ := iterator.heap.list.Get(iterator.index) + return value +} + +// Index returns the current element's index. +// Does not modify the state of the iterator. +func (iterator *Iterator) Index() int { + return iterator.index +} + +// Begin resets the iterator to its initial state (one-before-first) +// Call Next() to fetch the first element if any. +func (iterator *Iterator) Begin() { + iterator.index = -1 +} + +// End moves the iterator past the last element (one-past-the-end). +// Call Prev() to fetch the last element if any. +func (iterator *Iterator) End() { + iterator.index = iterator.heap.Size() +} + +// First moves the iterator to the first element and returns true if there was a first element in the container. +// If First() returns true, then first element's index and value can be retrieved by Index() and Value(). +// Modifies the state of the iterator. +func (iterator *Iterator) First() bool { + iterator.Begin() + return iterator.Next() +} + +// Last moves the iterator to the last element and returns true if there was a last element in the container. +// If Last() returns true, then last element's index and value can be retrieved by Index() and Value(). +// Modifies the state of the iterator. +func (iterator *Iterator) Last() bool { + iterator.End() + return iterator.Prev() +} diff --git a/vendor/github.com/emirpasic/gods/trees/binaryheap/serialization.go b/vendor/github.com/emirpasic/gods/trees/binaryheap/serialization.go new file mode 100644 index 0000000000..00d0c7719c --- /dev/null +++ b/vendor/github.com/emirpasic/gods/trees/binaryheap/serialization.go @@ -0,0 +1,22 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package binaryheap + +import "github.com/emirpasic/gods/containers" + +func assertSerializationImplementation() { + var _ containers.JSONSerializer = (*Heap)(nil) + var _ containers.JSONDeserializer = (*Heap)(nil) +} + +// ToJSON outputs the JSON representation of the heap. +func (heap *Heap) ToJSON() ([]byte, error) { + return heap.list.ToJSON() +} + +// FromJSON populates the heap from the input JSON representation. +func (heap *Heap) FromJSON(data []byte) error { + return heap.list.FromJSON(data) +} diff --git a/vendor/github.com/emirpasic/gods/trees/trees.go b/vendor/github.com/emirpasic/gods/trees/trees.go new file mode 100644 index 0000000000..a5a7427d34 --- /dev/null +++ b/vendor/github.com/emirpasic/gods/trees/trees.go @@ -0,0 +1,21 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package trees provides an abstract Tree interface. +// +// In computer science, a tree is a widely used abstract data type (ADT) or data structure implementing this ADT that simulates a hierarchical tree structure, with a root value and subtrees of children with a parent node, represented as a set of linked nodes. +// +// Reference: https://en.wikipedia.org/wiki/Tree_%28data_structure%29 +package trees + +import "github.com/emirpasic/gods/containers" + +// Tree interface that all trees implement +type Tree interface { + containers.Container + // Empty() bool + // Size() int + // Clear() + // Values() []interface{} +} diff --git a/vendor/github.com/emirpasic/gods/utils/comparator.go b/vendor/github.com/emirpasic/gods/utils/comparator.go new file mode 100644 index 0000000000..6a9afbf346 --- /dev/null +++ b/vendor/github.com/emirpasic/gods/utils/comparator.go @@ -0,0 +1,251 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package utils + +import "time" + +// Comparator will make type assertion (see IntComparator for example), +// which will panic if a or b are not of the asserted type. +// +// Should return a number: +// negative , if a < b +// zero , if a == b +// positive , if a > b +type Comparator func(a, b interface{}) int + +// StringComparator provides a fast comparison on strings +func StringComparator(a, b interface{}) int { + s1 := a.(string) + s2 := b.(string) + min := len(s2) + if len(s1) < len(s2) { + min = len(s1) + } + diff := 0 + for i := 0; i < min && diff == 0; i++ { + diff = int(s1[i]) - int(s2[i]) + } + if diff == 0 { + diff = len(s1) - len(s2) + } + if diff < 0 { + return -1 + } + if diff > 0 { + return 1 + } + return 0 +} + +// IntComparator provides a basic comparison on int +func IntComparator(a, b interface{}) int { + aAsserted := a.(int) + bAsserted := b.(int) + switch { + case aAsserted > bAsserted: + return 1 + case aAsserted < bAsserted: + return -1 + default: + return 0 + } +} + +// Int8Comparator provides a basic comparison on int8 +func Int8Comparator(a, b interface{}) int { + aAsserted := a.(int8) + bAsserted := b.(int8) + switch { + case aAsserted > bAsserted: + return 1 + case aAsserted < bAsserted: + return -1 + default: + return 0 + } +} + +// Int16Comparator provides a basic comparison on int16 +func Int16Comparator(a, b interface{}) int { + aAsserted := a.(int16) + bAsserted := b.(int16) + switch { + case aAsserted > bAsserted: + return 1 + case aAsserted < bAsserted: + return -1 + default: + return 0 + } +} + +// Int32Comparator provides a basic comparison on int32 +func Int32Comparator(a, b interface{}) int { + aAsserted := a.(int32) + bAsserted := b.(int32) + switch { + case aAsserted > bAsserted: + return 1 + case aAsserted < bAsserted: + return -1 + default: + return 0 + } +} + +// Int64Comparator provides a basic comparison on int64 +func Int64Comparator(a, b interface{}) int { + aAsserted := a.(int64) + bAsserted := b.(int64) + switch { + case aAsserted > bAsserted: + return 1 + case aAsserted < bAsserted: + return -1 + default: + return 0 + } +} + +// UIntComparator provides a basic comparison on uint +func UIntComparator(a, b interface{}) int { + aAsserted := a.(uint) + bAsserted := b.(uint) + switch { + case aAsserted > bAsserted: + return 1 + case aAsserted < bAsserted: + return -1 + default: + return 0 + } +} + +// UInt8Comparator provides a basic comparison on uint8 +func UInt8Comparator(a, b interface{}) int { + aAsserted := a.(uint8) + bAsserted := b.(uint8) + switch { + case aAsserted > bAsserted: + return 1 + case aAsserted < bAsserted: + return -1 + default: + return 0 + } +} + +// UInt16Comparator provides a basic comparison on uint16 +func UInt16Comparator(a, b interface{}) int { + aAsserted := a.(uint16) + bAsserted := b.(uint16) + switch { + case aAsserted > bAsserted: + return 1 + case aAsserted < bAsserted: + return -1 + default: + return 0 + } +} + +// UInt32Comparator provides a basic comparison on uint32 +func UInt32Comparator(a, b interface{}) int { + aAsserted := a.(uint32) + bAsserted := b.(uint32) + switch { + case aAsserted > bAsserted: + return 1 + case aAsserted < bAsserted: + return -1 + default: + return 0 + } +} + +// UInt64Comparator provides a basic comparison on uint64 +func UInt64Comparator(a, b interface{}) int { + aAsserted := a.(uint64) + bAsserted := b.(uint64) + switch { + case aAsserted > bAsserted: + return 1 + case aAsserted < bAsserted: + return -1 + default: + return 0 + } +} + +// Float32Comparator provides a basic comparison on float32 +func Float32Comparator(a, b interface{}) int { + aAsserted := a.(float32) + bAsserted := b.(float32) + switch { + case aAsserted > bAsserted: + return 1 + case aAsserted < bAsserted: + return -1 + default: + return 0 + } +} + +// Float64Comparator provides a basic comparison on float64 +func Float64Comparator(a, b interface{}) int { + aAsserted := a.(float64) + bAsserted := b.(float64) + switch { + case aAsserted > bAsserted: + return 1 + case aAsserted < bAsserted: + return -1 + default: + return 0 + } +} + +// ByteComparator provides a basic comparison on byte +func ByteComparator(a, b interface{}) int { + aAsserted := a.(byte) + bAsserted := b.(byte) + switch { + case aAsserted > bAsserted: + return 1 + case aAsserted < bAsserted: + return -1 + default: + return 0 + } +} + +// RuneComparator provides a basic comparison on rune +func RuneComparator(a, b interface{}) int { + aAsserted := a.(rune) + bAsserted := b.(rune) + switch { + case aAsserted > bAsserted: + return 1 + case aAsserted < bAsserted: + return -1 + default: + return 0 + } +} + +// TimeComparator provides a basic comparison on time.Time +func TimeComparator(a, b interface{}) int { + aAsserted := a.(time.Time) + bAsserted := b.(time.Time) + + switch { + case aAsserted.After(bAsserted): + return 1 + case aAsserted.Before(bAsserted): + return -1 + default: + return 0 + } +} diff --git a/vendor/github.com/emirpasic/gods/utils/sort.go b/vendor/github.com/emirpasic/gods/utils/sort.go new file mode 100644 index 0000000000..79ced1f5d2 --- /dev/null +++ b/vendor/github.com/emirpasic/gods/utils/sort.go @@ -0,0 +1,29 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package utils + +import "sort" + +// Sort sorts values (in-place) with respect to the given comparator. +// +// Uses Go's sort (hybrid of quicksort for large and then insertion sort for smaller slices). +func Sort(values []interface{}, comparator Comparator) { + sort.Sort(sortable{values, comparator}) +} + +type sortable struct { + values []interface{} + comparator Comparator +} + +func (s sortable) Len() int { + return len(s.values) +} +func (s sortable) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] +} +func (s sortable) Less(i, j int) bool { + return s.comparator(s.values[i], s.values[j]) < 0 +} diff --git a/vendor/github.com/emirpasic/gods/utils/utils.go b/vendor/github.com/emirpasic/gods/utils/utils.go new file mode 100644 index 0000000000..1ad49cbc07 --- /dev/null +++ b/vendor/github.com/emirpasic/gods/utils/utils.go @@ -0,0 +1,47 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package utils provides common utility functions. +// +// Provided functionalities: +// - sorting +// - comparators +package utils + +import ( + "fmt" + "strconv" +) + +// ToString converts a value to string. +func ToString(value interface{}) string { + switch value.(type) { + case string: + return value.(string) + case int8: + return strconv.FormatInt(int64(value.(int8)), 10) + case int16: + return strconv.FormatInt(int64(value.(int16)), 10) + case int32: + return strconv.FormatInt(int64(value.(int32)), 10) + case int64: + return strconv.FormatInt(int64(value.(int64)), 10) + case uint8: + return strconv.FormatUint(uint64(value.(uint8)), 10) + case uint16: + return strconv.FormatUint(uint64(value.(uint16)), 10) + case uint32: + return strconv.FormatUint(uint64(value.(uint32)), 10) + case uint64: + return strconv.FormatUint(uint64(value.(uint64)), 10) + case float32: + return strconv.FormatFloat(float64(value.(float32)), 'g', -1, 64) + case float64: + return strconv.FormatFloat(float64(value.(float64)), 'g', -1, 64) + case bool: + return strconv.FormatBool(value.(bool)) + default: + return fmt.Sprintf("%+v", value) + } +} diff --git a/vendor/github.com/go-git/gcfg/LICENSE b/vendor/github.com/go-git/gcfg/LICENSE new file mode 100644 index 0000000000..87a5cede33 --- /dev/null +++ b/vendor/github.com/go-git/gcfg/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go +Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/go-git/gcfg/README b/vendor/github.com/go-git/gcfg/README new file mode 100644 index 0000000000..1ff233a529 --- /dev/null +++ b/vendor/github.com/go-git/gcfg/README @@ -0,0 +1,4 @@ +Gcfg reads INI-style configuration files into Go structs; +supports user-defined types and subsections. + +Package docs: https://godoc.org/gopkg.in/gcfg.v1 diff --git a/vendor/github.com/go-git/gcfg/doc.go b/vendor/github.com/go-git/gcfg/doc.go new file mode 100644 index 0000000000..7bdefbf020 --- /dev/null +++ b/vendor/github.com/go-git/gcfg/doc.go @@ -0,0 +1,145 @@ +// Package gcfg reads "INI-style" text-based configuration files with +// "name=value" pairs grouped into sections (gcfg files). +// +// This package is still a work in progress; see the sections below for planned +// changes. +// +// Syntax +// +// The syntax is based on that used by git config: +// http://git-scm.com/docs/git-config#_syntax . +// There are some (planned) differences compared to the git config format: +// - improve data portability: +// - must be encoded in UTF-8 (for now) and must not contain the 0 byte +// - include and "path" type is not supported +// (path type may be implementable as a user-defined type) +// - internationalization +// - section and variable names can contain unicode letters, unicode digits +// (as defined in http://golang.org/ref/spec#Characters ) and hyphens +// (U+002D), starting with a unicode letter +// - disallow potentially ambiguous or misleading definitions: +// - `[sec.sub]` format is not allowed (deprecated in gitconfig) +// - `[sec ""]` is not allowed +// - use `[sec]` for section name "sec" and empty subsection name +// - (planned) within a single file, definitions must be contiguous for each: +// - section: '[secA]' -> '[secB]' -> '[secA]' is an error +// - subsection: '[sec "A"]' -> '[sec "B"]' -> '[sec "A"]' is an error +// - multivalued variable: 'multi=a' -> 'other=x' -> 'multi=b' is an error +// +// Data structure +// +// The functions in this package read values into a user-defined struct. +// Each section corresponds to a struct field in the config struct, and each +// variable in a section corresponds to a data field in the section struct. +// The mapping of each section or variable name to fields is done either based +// on the "gcfg" struct tag or by matching the name of the section or variable, +// ignoring case. In the latter case, hyphens '-' in section and variable names +// correspond to underscores '_' in field names. +// Fields must be exported; to use a section or variable name starting with a +// letter that is neither upper- or lower-case, prefix the field name with 'X'. +// (See https://code.google.com/p/go/issues/detail?id=5763#c4 .) +// +// For sections with subsections, the corresponding field in config must be a +// map, rather than a struct, with string keys and pointer-to-struct values. +// Values for subsection variables are stored in the map with the subsection +// name used as the map key. +// (Note that unlike section and variable names, subsection names are case +// sensitive.) +// When using a map, and there is a section with the same section name but +// without a subsection name, its values are stored with the empty string used +// as the key. +// It is possible to provide default values for subsections in the section +// "default-" (or by setting values in the corresponding struct +// field "Default_"). +// +// The functions in this package panic if config is not a pointer to a struct, +// or when a field is not of a suitable type (either a struct or a map with +// string keys and pointer-to-struct values). +// +// Parsing of values +// +// The section structs in the config struct may contain single-valued or +// multi-valued variables. Variables of unnamed slice type (that is, a type +// starting with `[]`) are treated as multi-value; all others (including named +// slice types) are treated as single-valued variables. +// +// Single-valued variables are handled based on the type as follows. +// Unnamed pointer types (that is, types starting with `*`) are dereferenced, +// and if necessary, a new instance is allocated. +// +// For types implementing the encoding.TextUnmarshaler interface, the +// UnmarshalText method is used to set the value. Implementing this method is +// the recommended way for parsing user-defined types. +// +// For fields of string kind, the value string is assigned to the field, after +// unquoting and unescaping as needed. +// For fields of bool kind, the field is set to true if the value is "true", +// "yes", "on" or "1", and set to false if the value is "false", "no", "off" or +// "0", ignoring case. In addition, single-valued bool fields can be specified +// with a "blank" value (variable name without equals sign and value); in such +// case the value is set to true. +// +// Predefined integer types [u]int(|8|16|32|64) and big.Int are parsed as +// decimal or hexadecimal (if having '0x' prefix). (This is to prevent +// unintuitively handling zero-padded numbers as octal.) Other types having +// [u]int* as the underlying type, such as os.FileMode and uintptr allow +// decimal, hexadecimal, or octal values. +// Parsing mode for integer types can be overridden using the struct tag option +// ",int=mode" where mode is a combination of the 'd', 'h', and 'o' characters +// (each standing for decimal, hexadecimal, and octal, respectively.) +// +// All other types are parsed using fmt.Sscanf with the "%v" verb. +// +// For multi-valued variables, each individual value is parsed as above and +// appended to the slice. If the first value is specified as a "blank" value +// (variable name without equals sign and value), a new slice is allocated; +// that is any values previously set in the slice will be ignored. +// +// The types subpackage for provides helpers for parsing "enum-like" and integer +// types. +// +// Error handling +// +// There are 3 types of errors: +// +// - programmer errors / panics: +// - invalid configuration structure +// - data errors: +// - fatal errors: +// - invalid configuration syntax +// - warnings: +// - data that doesn't belong to any part of the config structure +// +// Programmer errors trigger panics. These are should be fixed by the programmer +// before releasing code that uses gcfg. +// +// Data errors cause gcfg to return a non-nil error value. This includes the +// case when there are extra unknown key-value definitions in the configuration +// data (extra data). +// However, in some occasions it is desirable to be able to proceed in +// situations when the only data error is that of extra data. +// These errors are handled at a different (warning) priority and can be +// filtered out programmatically. To ignore extra data warnings, wrap the +// gcfg.Read*Into invocation into a call to gcfg.FatalOnly. +// +// TODO +// +// The following is a list of changes under consideration: +// - documentation +// - self-contained syntax documentation +// - more practical examples +// - move TODOs to issue tracker (eventually) +// - syntax +// - reconsider valid escape sequences +// (gitconfig doesn't support \r in value, \t in subsection name, etc.) +// - reading / parsing gcfg files +// - define internal representation structure +// - support multiple inputs (readers, strings, files) +// - support declaring encoding (?) +// - support varying fields sets for subsections (?) +// - writing gcfg files +// - error handling +// - make error context accessible programmatically? +// - limit input size? +// +package gcfg // import "github.com/go-git/gcfg" diff --git a/vendor/github.com/go-git/gcfg/errors.go b/vendor/github.com/go-git/gcfg/errors.go new file mode 100644 index 0000000000..853c76021d --- /dev/null +++ b/vendor/github.com/go-git/gcfg/errors.go @@ -0,0 +1,41 @@ +package gcfg + +import ( + "gopkg.in/warnings.v0" +) + +// FatalOnly filters the results of a Read*Into invocation and returns only +// fatal errors. That is, errors (warnings) indicating data for unknown +// sections / variables is ignored. Example invocation: +// +// err := gcfg.FatalOnly(gcfg.ReadFileInto(&cfg, configFile)) +// if err != nil { +// ... +// +func FatalOnly(err error) error { + return warnings.FatalOnly(err) +} + +func isFatal(err error) bool { + _, ok := err.(extraData) + return !ok +} + +type extraData struct { + section string + subsection *string + variable *string +} + +func (e extraData) Error() string { + s := "can't store data at section \"" + e.section + "\"" + if e.subsection != nil { + s += ", subsection \"" + *e.subsection + "\"" + } + if e.variable != nil { + s += ", variable \"" + *e.variable + "\"" + } + return s +} + +var _ error = extraData{} diff --git a/vendor/github.com/go-git/gcfg/go1_0.go b/vendor/github.com/go-git/gcfg/go1_0.go new file mode 100644 index 0000000000..6670210791 --- /dev/null +++ b/vendor/github.com/go-git/gcfg/go1_0.go @@ -0,0 +1,7 @@ +// +build !go1.2 + +package gcfg + +type textUnmarshaler interface { + UnmarshalText(text []byte) error +} diff --git a/vendor/github.com/go-git/gcfg/go1_2.go b/vendor/github.com/go-git/gcfg/go1_2.go new file mode 100644 index 0000000000..6f5843bc7c --- /dev/null +++ b/vendor/github.com/go-git/gcfg/go1_2.go @@ -0,0 +1,9 @@ +// +build go1.2 + +package gcfg + +import ( + "encoding" +) + +type textUnmarshaler encoding.TextUnmarshaler diff --git a/vendor/github.com/go-git/gcfg/read.go b/vendor/github.com/go-git/gcfg/read.go new file mode 100644 index 0000000000..4dfdc5cf30 --- /dev/null +++ b/vendor/github.com/go-git/gcfg/read.go @@ -0,0 +1,273 @@ +package gcfg + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "strings" + + "github.com/go-git/gcfg/scanner" + "github.com/go-git/gcfg/token" + "gopkg.in/warnings.v0" +) + +var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t', 'b': '\b'} + +// no error: invalid literals should be caught by scanner +func unquote(s string) string { + u, q, esc := make([]rune, 0, len(s)), false, false + for _, c := range s { + if esc { + uc, ok := unescape[c] + switch { + case ok: + u = append(u, uc) + fallthrough + case !q && c == '\n': + esc = false + continue + } + panic("invalid escape sequence") + } + switch c { + case '"': + q = !q + case '\\': + esc = true + default: + u = append(u, c) + } + } + if q { + panic("missing end quote") + } + if esc { + panic("invalid escape sequence") + } + return string(u) +} + +func read(c *warnings.Collector, callback func(string, string, string, string, bool) error, + fset *token.FileSet, file *token.File, src []byte) error { + // + var s scanner.Scanner + var errs scanner.ErrorList + s.Init(file, src, func(p token.Position, m string) { errs.Add(p, m) }, 0) + sect, sectsub := "", "" + pos, tok, lit := s.Scan() + errfn := func(msg string) error { + return fmt.Errorf("%s: %s", fset.Position(pos), msg) + } + for { + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + switch tok { + case token.EOF: + return nil + case token.EOL, token.COMMENT: + pos, tok, lit = s.Scan() + case token.LBRACK: + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + if tok != token.IDENT { + if err := c.Collect(errfn("expected section name")); err != nil { + return err + } + } + sect, sectsub = lit, "" + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + if tok == token.STRING { + sectsub = unquote(lit) + if sectsub == "" { + if err := c.Collect(errfn("empty subsection name")); err != nil { + return err + } + } + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + } + if tok != token.RBRACK { + if sectsub == "" { + if err := c.Collect(errfn("expected subsection name or right bracket")); err != nil { + return err + } + } + if err := c.Collect(errfn("expected right bracket")); err != nil { + return err + } + } + pos, tok, lit = s.Scan() + if tok != token.EOL && tok != token.EOF && tok != token.COMMENT { + if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil { + return err + } + } + // If a section/subsection header was found, ensure a + // container object is created, even if there are no + // variables further down. + err := c.Collect(callback(sect, sectsub, "", "", true)) + if err != nil { + return err + } + case token.IDENT: + if sect == "" { + if err := c.Collect(errfn("expected section header")); err != nil { + return err + } + } + n := lit + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + return errs.Err() + } + blank, v := tok == token.EOF || tok == token.EOL || tok == token.COMMENT, "" + if !blank { + if tok != token.ASSIGN { + if err := c.Collect(errfn("expected '='")); err != nil { + return err + } + } + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + if tok != token.STRING { + if err := c.Collect(errfn("expected value")); err != nil { + return err + } + } + v = unquote(lit) + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + if tok != token.EOL && tok != token.EOF && tok != token.COMMENT { + if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil { + return err + } + } + } + err := c.Collect(callback(sect, sectsub, n, v, blank)) + if err != nil { + return err + } + default: + if sect == "" { + if err := c.Collect(errfn("expected section header")); err != nil { + return err + } + } + if err := c.Collect(errfn("expected section header or variable declaration")); err != nil { + return err + } + } + } + panic("never reached") +} + +func readInto(config interface{}, fset *token.FileSet, file *token.File, + src []byte) error { + // + c := warnings.NewCollector(isFatal) + firstPassCallback := func(s string, ss string, k string, v string, bv bool) error { + return set(c, config, s, ss, k, v, bv, false) + } + err := read(c, firstPassCallback, fset, file, src) + if err != nil { + return err + } + secondPassCallback := func(s string, ss string, k string, v string, bv bool) error { + return set(c, config, s, ss, k, v, bv, true) + } + err = read(c, secondPassCallback, fset, file, src) + if err != nil { + return err + } + return c.Done() +} + +// ReadWithCallback reads gcfg formatted data from reader and calls +// callback with each section and option found. +// +// Callback is called with section, subsection, option key, option value +// and blank value flag as arguments. +// +// When a section is found, callback is called with nil subsection, option key +// and option value. +// +// When a subsection is found, callback is called with nil option key and +// option value. +// +// If blank value flag is true, it means that the value was not set for an option +// (as opposed to set to empty string). +// +// If callback returns an error, ReadWithCallback terminates with an error too. +func ReadWithCallback(reader io.Reader, callback func(string, string, string, string, bool) error) error { + src, err := ioutil.ReadAll(reader) + if err != nil { + return err + } + + fset := token.NewFileSet() + file := fset.AddFile("", fset.Base(), len(src)) + c := warnings.NewCollector(isFatal) + + return read(c, callback, fset, file, src) +} + +// ReadInto reads gcfg formatted data from reader and sets the values into the +// corresponding fields in config. +func ReadInto(config interface{}, reader io.Reader) error { + src, err := ioutil.ReadAll(reader) + if err != nil { + return err + } + fset := token.NewFileSet() + file := fset.AddFile("", fset.Base(), len(src)) + return readInto(config, fset, file, src) +} + +// ReadStringInto reads gcfg formatted data from str and sets the values into +// the corresponding fields in config. +func ReadStringInto(config interface{}, str string) error { + r := strings.NewReader(str) + return ReadInto(config, r) +} + +// ReadFileInto reads gcfg formatted data from the file filename and sets the +// values into the corresponding fields in config. +func ReadFileInto(config interface{}, filename string) error { + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + src, err := ioutil.ReadAll(f) + if err != nil { + return err + } + fset := token.NewFileSet() + file := fset.AddFile(filename, fset.Base(), len(src)) + return readInto(config, fset, file, src) +} diff --git a/vendor/github.com/go-git/gcfg/scanner/errors.go b/vendor/github.com/go-git/gcfg/scanner/errors.go new file mode 100644 index 0000000000..a6e00f5c64 --- /dev/null +++ b/vendor/github.com/go-git/gcfg/scanner/errors.go @@ -0,0 +1,121 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package scanner + +import ( + "fmt" + "io" + "sort" +) + +import ( + "github.com/go-git/gcfg/token" +) + +// In an ErrorList, an error is represented by an *Error. +// The position Pos, if valid, points to the beginning of +// the offending token, and the error condition is described +// by Msg. +// +type Error struct { + Pos token.Position + Msg string +} + +// Error implements the error interface. +func (e Error) Error() string { + if e.Pos.Filename != "" || e.Pos.IsValid() { + // don't print "" + // TODO(gri) reconsider the semantics of Position.IsValid + return e.Pos.String() + ": " + e.Msg + } + return e.Msg +} + +// ErrorList is a list of *Errors. +// The zero value for an ErrorList is an empty ErrorList ready to use. +// +type ErrorList []*Error + +// Add adds an Error with given position and error message to an ErrorList. +func (p *ErrorList) Add(pos token.Position, msg string) { + *p = append(*p, &Error{pos, msg}) +} + +// Reset resets an ErrorList to no errors. +func (p *ErrorList) Reset() { *p = (*p)[0:0] } + +// ErrorList implements the sort Interface. +func (p ErrorList) Len() int { return len(p) } +func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p ErrorList) Less(i, j int) bool { + e := &p[i].Pos + f := &p[j].Pos + if e.Filename < f.Filename { + return true + } + if e.Filename == f.Filename { + return e.Offset < f.Offset + } + return false +} + +// Sort sorts an ErrorList. *Error entries are sorted by position, +// other errors are sorted by error message, and before any *Error +// entry. +// +func (p ErrorList) Sort() { + sort.Sort(p) +} + +// RemoveMultiples sorts an ErrorList and removes all but the first error per line. +func (p *ErrorList) RemoveMultiples() { + sort.Sort(p) + var last token.Position // initial last.Line is != any legal error line + i := 0 + for _, e := range *p { + if e.Pos.Filename != last.Filename || e.Pos.Line != last.Line { + last = e.Pos + (*p)[i] = e + i++ + } + } + (*p) = (*p)[0:i] +} + +// An ErrorList implements the error interface. +func (p ErrorList) Error() string { + switch len(p) { + case 0: + return "no errors" + case 1: + return p[0].Error() + } + return fmt.Sprintf("%s (and %d more errors)", p[0], len(p)-1) +} + +// Err returns an error equivalent to this error list. +// If the list is empty, Err returns nil. +func (p ErrorList) Err() error { + if len(p) == 0 { + return nil + } + return p +} + +// PrintError is a utility function that prints a list of errors to w, +// one error per line, if the err parameter is an ErrorList. Otherwise +// it prints the err string. +// +func PrintError(w io.Writer, err error) { + if list, ok := err.(ErrorList); ok { + for _, e := range list { + fmt.Fprintf(w, "%s\n", e) + } + } else if err != nil { + fmt.Fprintf(w, "%s\n", err) + } +} diff --git a/vendor/github.com/go-git/gcfg/scanner/scanner.go b/vendor/github.com/go-git/gcfg/scanner/scanner.go new file mode 100644 index 0000000000..41aafec758 --- /dev/null +++ b/vendor/github.com/go-git/gcfg/scanner/scanner.go @@ -0,0 +1,342 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package scanner implements a scanner for gcfg configuration text. +// It takes a []byte as source which can then be tokenized +// through repeated calls to the Scan method. +// +// Note that the API for the scanner package may change to accommodate new +// features or implementation changes in gcfg. +// +package scanner + +import ( + "fmt" + "path/filepath" + "unicode" + "unicode/utf8" +) + +import ( + "github.com/go-git/gcfg/token" +) + +// An ErrorHandler may be provided to Scanner.Init. If a syntax error is +// encountered and a handler was installed, the handler is called with a +// position and an error message. The position points to the beginning of +// the offending token. +// +type ErrorHandler func(pos token.Position, msg string) + +// A Scanner holds the scanner's internal state while processing +// a given text. It can be allocated as part of another data +// structure but must be initialized via Init before use. +// +type Scanner struct { + // immutable state + file *token.File // source file handle + dir string // directory portion of file.Name() + src []byte // source + err ErrorHandler // error reporting; or nil + mode Mode // scanning mode + + // scanning state + ch rune // current character + offset int // character offset + rdOffset int // reading offset (position after current character) + lineOffset int // current line offset + nextVal bool // next token is expected to be a value + + // public state - ok to modify + ErrorCount int // number of errors encountered +} + +// Read the next Unicode char into s.ch. +// s.ch < 0 means end-of-file. +// +func (s *Scanner) next() { + if s.rdOffset < len(s.src) { + s.offset = s.rdOffset + if s.ch == '\n' { + s.lineOffset = s.offset + s.file.AddLine(s.offset) + } + r, w := rune(s.src[s.rdOffset]), 1 + switch { + case r == 0: + s.error(s.offset, "illegal character NUL") + case r >= 0x80: + // not ASCII + r, w = utf8.DecodeRune(s.src[s.rdOffset:]) + if r == utf8.RuneError && w == 1 { + s.error(s.offset, "illegal UTF-8 encoding") + } + } + s.rdOffset += w + s.ch = r + } else { + s.offset = len(s.src) + if s.ch == '\n' { + s.lineOffset = s.offset + s.file.AddLine(s.offset) + } + s.ch = -1 // eof + } +} + +// A mode value is a set of flags (or 0). +// They control scanner behavior. +// +type Mode uint + +const ( + ScanComments Mode = 1 << iota // return comments as COMMENT tokens +) + +// Init prepares the scanner s to tokenize the text src by setting the +// scanner at the beginning of src. The scanner uses the file set file +// for position information and it adds line information for each line. +// It is ok to re-use the same file when re-scanning the same file as +// line information which is already present is ignored. Init causes a +// panic if the file size does not match the src size. +// +// Calls to Scan will invoke the error handler err if they encounter a +// syntax error and err is not nil. Also, for each error encountered, +// the Scanner field ErrorCount is incremented by one. The mode parameter +// determines how comments are handled. +// +// Note that Init may call err if there is an error in the first character +// of the file. +// +func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) { + // Explicitly initialize all fields since a scanner may be reused. + if file.Size() != len(src) { + panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src))) + } + s.file = file + s.dir, _ = filepath.Split(file.Name()) + s.src = src + s.err = err + s.mode = mode + + s.ch = ' ' + s.offset = 0 + s.rdOffset = 0 + s.lineOffset = 0 + s.ErrorCount = 0 + s.nextVal = false + + s.next() +} + +func (s *Scanner) error(offs int, msg string) { + if s.err != nil { + s.err(s.file.Position(s.file.Pos(offs)), msg) + } + s.ErrorCount++ +} + +func (s *Scanner) scanComment() string { + // initial [;#] already consumed + offs := s.offset - 1 // position of initial [;#] + + for s.ch != '\n' && s.ch >= 0 { + s.next() + } + return string(s.src[offs:s.offset]) +} + +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch >= 0x80 && unicode.IsLetter(ch) +} + +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +func (s *Scanner) scanIdentifier() string { + offs := s.offset + for isLetter(s.ch) || isDigit(s.ch) || s.ch == '-' { + s.next() + } + return string(s.src[offs:s.offset]) +} + +func (s *Scanner) scanEscape(val bool) { + offs := s.offset + ch := s.ch + s.next() // always make progress + switch ch { + case '\\', '"': + // ok + case 'n', 't', 'b': + if val { + break // ok + } + fallthrough + default: + s.error(offs, "unknown escape sequence") + } +} + +func (s *Scanner) scanString() string { + // '"' opening already consumed + offs := s.offset - 1 + + for s.ch != '"' { + ch := s.ch + s.next() + if ch == '\n' || ch < 0 { + s.error(offs, "string not terminated") + break + } + if ch == '\\' { + s.scanEscape(false) + } + } + + s.next() + + return string(s.src[offs:s.offset]) +} + +func stripCR(b []byte) []byte { + c := make([]byte, len(b)) + i := 0 + for _, ch := range b { + if ch != '\r' { + c[i] = ch + i++ + } + } + return c[:i] +} + +func (s *Scanner) scanValString() string { + offs := s.offset + + hasCR := false + end := offs + inQuote := false +loop: + for inQuote || s.ch >= 0 && s.ch != '\n' && s.ch != ';' && s.ch != '#' { + ch := s.ch + s.next() + switch { + case inQuote && ch == '\\': + s.scanEscape(true) + case !inQuote && ch == '\\': + if s.ch == '\r' { + hasCR = true + s.next() + } + if s.ch != '\n' { + s.scanEscape(true) + } else { + s.next() + } + case ch == '"': + inQuote = !inQuote + case ch == '\r': + hasCR = true + case ch < 0 || inQuote && ch == '\n': + s.error(offs, "string not terminated") + break loop + } + if inQuote || !isWhiteSpace(ch) { + end = s.offset + } + } + + lit := s.src[offs:end] + if hasCR { + lit = stripCR(lit) + } + + return string(lit) +} + +func isWhiteSpace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\r' +} + +func (s *Scanner) skipWhitespace() { + for isWhiteSpace(s.ch) { + s.next() + } +} + +// Scan scans the next token and returns the token position, the token, +// and its literal string if applicable. The source end is indicated by +// token.EOF. +// +// If the returned token is a literal (token.IDENT, token.STRING) or +// token.COMMENT, the literal string has the corresponding value. +// +// If the returned token is token.ILLEGAL, the literal string is the +// offending character. +// +// In all other cases, Scan returns an empty literal string. +// +// For more tolerant parsing, Scan will return a valid token if +// possible even if a syntax error was encountered. Thus, even +// if the resulting token sequence contains no illegal tokens, +// a client may not assume that no error occurred. Instead it +// must check the scanner's ErrorCount or the number of calls +// of the error handler, if there was one installed. +// +// Scan adds line information to the file added to the file +// set with Init. Token positions are relative to that file +// and thus relative to the file set. +// +func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) { +scanAgain: + s.skipWhitespace() + + // current token start + pos = s.file.Pos(s.offset) + + // determine token value + switch ch := s.ch; { + case s.nextVal: + lit = s.scanValString() + tok = token.STRING + s.nextVal = false + case isLetter(ch): + lit = s.scanIdentifier() + tok = token.IDENT + default: + s.next() // always make progress + switch ch { + case -1: + tok = token.EOF + case '\n': + tok = token.EOL + case '"': + tok = token.STRING + lit = s.scanString() + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case ';', '#': + // comment + lit = s.scanComment() + if s.mode&ScanComments == 0 { + // skip comment + goto scanAgain + } + tok = token.COMMENT + case '=': + tok = token.ASSIGN + s.nextVal = true + default: + s.error(s.file.Offset(pos), fmt.Sprintf("illegal character %#U", ch)) + tok = token.ILLEGAL + lit = string(ch) + } + } + + return +} diff --git a/vendor/github.com/go-git/gcfg/set.go b/vendor/github.com/go-git/gcfg/set.go new file mode 100644 index 0000000000..e2d9278025 --- /dev/null +++ b/vendor/github.com/go-git/gcfg/set.go @@ -0,0 +1,332 @@ +package gcfg + +import ( + "bytes" + "encoding/gob" + "fmt" + "math/big" + "reflect" + "strings" + "unicode" + "unicode/utf8" + + "github.com/go-git/gcfg/types" + "gopkg.in/warnings.v0" +) + +type tag struct { + ident string + intMode string +} + +func newTag(ts string) tag { + t := tag{} + s := strings.Split(ts, ",") + t.ident = s[0] + for _, tse := range s[1:] { + if strings.HasPrefix(tse, "int=") { + t.intMode = tse[len("int="):] + } + } + return t +} + +func fieldFold(v reflect.Value, name string) (reflect.Value, tag) { + var n string + r0, _ := utf8.DecodeRuneInString(name) + if unicode.IsLetter(r0) && !unicode.IsLower(r0) && !unicode.IsUpper(r0) { + n = "X" + } + n += strings.Replace(name, "-", "_", -1) + f, ok := v.Type().FieldByNameFunc(func(fieldName string) bool { + if !v.FieldByName(fieldName).CanSet() { + return false + } + f, _ := v.Type().FieldByName(fieldName) + t := newTag(f.Tag.Get("gcfg")) + if t.ident != "" { + return strings.EqualFold(t.ident, name) + } + return strings.EqualFold(n, fieldName) + }) + if !ok { + return reflect.Value{}, tag{} + } + return v.FieldByName(f.Name), newTag(f.Tag.Get("gcfg")) +} + +type setter func(destp interface{}, blank bool, val string, t tag) error + +var errUnsupportedType = fmt.Errorf("unsupported type") +var errBlankUnsupported = fmt.Errorf("blank value not supported for type") + +var setters = []setter{ + typeSetter, textUnmarshalerSetter, kindSetter, scanSetter, +} + +func textUnmarshalerSetter(d interface{}, blank bool, val string, t tag) error { + dtu, ok := d.(textUnmarshaler) + if !ok { + return errUnsupportedType + } + if blank { + return errBlankUnsupported + } + return dtu.UnmarshalText([]byte(val)) +} + +func boolSetter(d interface{}, blank bool, val string, t tag) error { + if blank { + reflect.ValueOf(d).Elem().Set(reflect.ValueOf(true)) + return nil + } + b, err := types.ParseBool(val) + if err == nil { + reflect.ValueOf(d).Elem().Set(reflect.ValueOf(b)) + } + return err +} + +func intMode(mode string) types.IntMode { + var m types.IntMode + if strings.ContainsAny(mode, "dD") { + m |= types.Dec + } + if strings.ContainsAny(mode, "hH") { + m |= types.Hex + } + if strings.ContainsAny(mode, "oO") { + m |= types.Oct + } + return m +} + +var typeModes = map[reflect.Type]types.IntMode{ + reflect.TypeOf(int(0)): types.Dec | types.Hex, + reflect.TypeOf(int8(0)): types.Dec | types.Hex, + reflect.TypeOf(int16(0)): types.Dec | types.Hex, + reflect.TypeOf(int32(0)): types.Dec | types.Hex, + reflect.TypeOf(int64(0)): types.Dec | types.Hex, + reflect.TypeOf(uint(0)): types.Dec | types.Hex, + reflect.TypeOf(uint8(0)): types.Dec | types.Hex, + reflect.TypeOf(uint16(0)): types.Dec | types.Hex, + reflect.TypeOf(uint32(0)): types.Dec | types.Hex, + reflect.TypeOf(uint64(0)): types.Dec | types.Hex, + // use default mode (allow dec/hex/oct) for uintptr type + reflect.TypeOf(big.Int{}): types.Dec | types.Hex, +} + +func intModeDefault(t reflect.Type) types.IntMode { + m, ok := typeModes[t] + if !ok { + m = types.Dec | types.Hex | types.Oct + } + return m +} + +func intSetter(d interface{}, blank bool, val string, t tag) error { + if blank { + return errBlankUnsupported + } + mode := intMode(t.intMode) + if mode == 0 { + mode = intModeDefault(reflect.TypeOf(d).Elem()) + } + return types.ParseInt(d, val, mode) +} + +func stringSetter(d interface{}, blank bool, val string, t tag) error { + if blank { + return errBlankUnsupported + } + dsp, ok := d.(*string) + if !ok { + return errUnsupportedType + } + *dsp = val + return nil +} + +var kindSetters = map[reflect.Kind]setter{ + reflect.String: stringSetter, + reflect.Bool: boolSetter, + reflect.Int: intSetter, + reflect.Int8: intSetter, + reflect.Int16: intSetter, + reflect.Int32: intSetter, + reflect.Int64: intSetter, + reflect.Uint: intSetter, + reflect.Uint8: intSetter, + reflect.Uint16: intSetter, + reflect.Uint32: intSetter, + reflect.Uint64: intSetter, + reflect.Uintptr: intSetter, +} + +var typeSetters = map[reflect.Type]setter{ + reflect.TypeOf(big.Int{}): intSetter, +} + +func typeSetter(d interface{}, blank bool, val string, tt tag) error { + t := reflect.ValueOf(d).Type().Elem() + setter, ok := typeSetters[t] + if !ok { + return errUnsupportedType + } + return setter(d, blank, val, tt) +} + +func kindSetter(d interface{}, blank bool, val string, tt tag) error { + k := reflect.ValueOf(d).Type().Elem().Kind() + setter, ok := kindSetters[k] + if !ok { + return errUnsupportedType + } + return setter(d, blank, val, tt) +} + +func scanSetter(d interface{}, blank bool, val string, tt tag) error { + if blank { + return errBlankUnsupported + } + return types.ScanFully(d, val, 'v') +} + +func newValue(c *warnings.Collector, sect string, vCfg reflect.Value, + vType reflect.Type) (reflect.Value, error) { + // + pv := reflect.New(vType) + dfltName := "default-" + sect + dfltField, _ := fieldFold(vCfg, dfltName) + var err error + if dfltField.IsValid() { + b := bytes.NewBuffer(nil) + ge := gob.NewEncoder(b) + if err = c.Collect(ge.EncodeValue(dfltField)); err != nil { + return pv, err + } + gd := gob.NewDecoder(bytes.NewReader(b.Bytes())) + if err = c.Collect(gd.DecodeValue(pv.Elem())); err != nil { + return pv, err + } + } + return pv, nil +} + +func set(c *warnings.Collector, cfg interface{}, sect, sub, name string, + value string, blankValue bool, subsectPass bool) error { + // + vPCfg := reflect.ValueOf(cfg) + if vPCfg.Kind() != reflect.Ptr || vPCfg.Elem().Kind() != reflect.Struct { + panic(fmt.Errorf("config must be a pointer to a struct")) + } + vCfg := vPCfg.Elem() + vSect, _ := fieldFold(vCfg, sect) + if !vSect.IsValid() { + err := extraData{section: sect} + return c.Collect(err) + } + isSubsect := vSect.Kind() == reflect.Map + if subsectPass != isSubsect { + return nil + } + if isSubsect { + vst := vSect.Type() + if vst.Key().Kind() != reflect.String || + vst.Elem().Kind() != reflect.Ptr || + vst.Elem().Elem().Kind() != reflect.Struct { + panic(fmt.Errorf("map field for section must have string keys and "+ + " pointer-to-struct values: section %q", sect)) + } + if vSect.IsNil() { + vSect.Set(reflect.MakeMap(vst)) + } + k := reflect.ValueOf(sub) + pv := vSect.MapIndex(k) + if !pv.IsValid() { + vType := vSect.Type().Elem().Elem() + var err error + if pv, err = newValue(c, sect, vCfg, vType); err != nil { + return err + } + vSect.SetMapIndex(k, pv) + } + vSect = pv.Elem() + } else if vSect.Kind() != reflect.Struct { + panic(fmt.Errorf("field for section must be a map or a struct: "+ + "section %q", sect)) + } else if sub != "" { + err := extraData{section: sect, subsection: &sub} + return c.Collect(err) + } + // Empty name is a special value, meaning that only the + // section/subsection object is to be created, with no values set. + if name == "" { + return nil + } + vVar, t := fieldFold(vSect, name) + if !vVar.IsValid() { + var err error + if isSubsect { + err = extraData{section: sect, subsection: &sub, variable: &name} + } else { + err = extraData{section: sect, variable: &name} + } + return c.Collect(err) + } + // vVal is either single-valued var, or newly allocated value within multi-valued var + var vVal reflect.Value + // multi-value if unnamed slice type + isMulti := vVar.Type().Name() == "" && vVar.Kind() == reflect.Slice || + vVar.Type().Name() == "" && vVar.Kind() == reflect.Ptr && vVar.Type().Elem().Name() == "" && vVar.Type().Elem().Kind() == reflect.Slice + if isMulti && vVar.Kind() == reflect.Ptr { + if vVar.IsNil() { + vVar.Set(reflect.New(vVar.Type().Elem())) + } + vVar = vVar.Elem() + } + if isMulti && blankValue { + vVar.Set(reflect.Zero(vVar.Type())) + return nil + } + if isMulti { + vVal = reflect.New(vVar.Type().Elem()).Elem() + } else { + vVal = vVar + } + isDeref := vVal.Type().Name() == "" && vVal.Type().Kind() == reflect.Ptr + isNew := isDeref && vVal.IsNil() + // vAddr is address of value to set (dereferenced & allocated as needed) + var vAddr reflect.Value + switch { + case isNew: + vAddr = reflect.New(vVal.Type().Elem()) + case isDeref && !isNew: + vAddr = vVal + default: + vAddr = vVal.Addr() + } + vAddrI := vAddr.Interface() + err, ok := error(nil), false + for _, s := range setters { + err = s(vAddrI, blankValue, value, t) + if err == nil { + ok = true + break + } + if err != errUnsupportedType { + return err + } + } + if !ok { + // in case all setters returned errUnsupportedType + return err + } + if isNew { // set reference if it was dereferenced and newly allocated + vVal.Set(vAddr) + } + if isMulti { // append if multi-valued + vVar.Set(reflect.Append(vVar, vVal)) + } + return nil +} diff --git a/vendor/github.com/go-git/gcfg/token/position.go b/vendor/github.com/go-git/gcfg/token/position.go new file mode 100644 index 0000000000..fc45c1e769 --- /dev/null +++ b/vendor/github.com/go-git/gcfg/token/position.go @@ -0,0 +1,435 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO(gri) consider making this a separate package outside the go directory. + +package token + +import ( + "fmt" + "sort" + "sync" +) + +// ----------------------------------------------------------------------------- +// Positions + +// Position describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +// +type Position struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (pos *Position) IsValid() bool { return pos.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +// +func (pos Position) String() string { + s := pos.Filename + if pos.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", pos.Line, pos.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Pos is a compact encoding of a source position within a file set. +// It can be converted into a Position for a more convenient, but much +// larger, representation. +// +// The Pos value for a given file is a number in the range [base, base+size], +// where base and size are specified when adding the file to the file set via +// AddFile. +// +// To create the Pos value for a specific source offset, first add +// the respective file to the current file set (via FileSet.AddFile) +// and then call File.Pos(offset) for that file. Given a Pos value p +// for a specific file set fset, the corresponding Position value is +// obtained by calling fset.Position(p). +// +// Pos values can be compared directly with the usual comparison operators: +// If two Pos values p and q are in the same file, comparing p and q is +// equivalent to comparing the respective source file offsets. If p and q +// are in different files, p < q is true if the file implied by p was added +// to the respective file set before the file implied by q. +// +type Pos int + +// The zero value for Pos is NoPos; there is no file and line information +// associated with it, and NoPos().IsValid() is false. NoPos is always +// smaller than any other Pos value. The corresponding Position value +// for NoPos is the zero value for Position. +// +const NoPos Pos = 0 + +// IsValid returns true if the position is valid. +func (p Pos) IsValid() bool { + return p != NoPos +} + +// ----------------------------------------------------------------------------- +// File + +// A File is a handle for a file belonging to a FileSet. +// A File has a name, size, and line offset table. +// +type File struct { + set *FileSet + name string // file name as provided to AddFile + base int // Pos value range for this file is [base...base+size] + size int // file size as provided to AddFile + + // lines and infos are protected by set.mutex + lines []int + infos []lineInfo +} + +// Name returns the file name of file f as registered with AddFile. +func (f *File) Name() string { + return f.name +} + +// Base returns the base offset of file f as registered with AddFile. +func (f *File) Base() int { + return f.base +} + +// Size returns the size of file f as registered with AddFile. +func (f *File) Size() int { + return f.size +} + +// LineCount returns the number of lines in file f. +func (f *File) LineCount() int { + f.set.mutex.RLock() + n := len(f.lines) + f.set.mutex.RUnlock() + return n +} + +// AddLine adds the line offset for a new line. +// The line offset must be larger than the offset for the previous line +// and smaller than the file size; otherwise the line offset is ignored. +// +func (f *File) AddLine(offset int) { + f.set.mutex.Lock() + if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset < f.size { + f.lines = append(f.lines, offset) + } + f.set.mutex.Unlock() +} + +// SetLines sets the line offsets for a file and returns true if successful. +// The line offsets are the offsets of the first character of each line; +// for instance for the content "ab\nc\n" the line offsets are {0, 3}. +// An empty file has an empty line offset table. +// Each line offset must be larger than the offset for the previous line +// and smaller than the file size; otherwise SetLines fails and returns +// false. +// +func (f *File) SetLines(lines []int) bool { + // verify validity of lines table + size := f.size + for i, offset := range lines { + if i > 0 && offset <= lines[i-1] || size <= offset { + return false + } + } + + // set lines table + f.set.mutex.Lock() + f.lines = lines + f.set.mutex.Unlock() + return true +} + +// SetLinesForContent sets the line offsets for the given file content. +func (f *File) SetLinesForContent(content []byte) { + var lines []int + line := 0 + for offset, b := range content { + if line >= 0 { + lines = append(lines, line) + } + line = -1 + if b == '\n' { + line = offset + 1 + } + } + + // set lines table + f.set.mutex.Lock() + f.lines = lines + f.set.mutex.Unlock() +} + +// A lineInfo object describes alternative file and line number +// information (such as provided via a //line comment in a .go +// file) for a given file offset. +type lineInfo struct { + // fields are exported to make them accessible to gob + Offset int + Filename string + Line int +} + +// AddLineInfo adds alternative file and line number information for +// a given file offset. The offset must be larger than the offset for +// the previously added alternative line info and smaller than the +// file size; otherwise the information is ignored. +// +// AddLineInfo is typically used to register alternative position +// information for //line filename:line comments in source files. +// +func (f *File) AddLineInfo(offset int, filename string, line int) { + f.set.mutex.Lock() + if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size { + f.infos = append(f.infos, lineInfo{offset, filename, line}) + } + f.set.mutex.Unlock() +} + +// Pos returns the Pos value for the given file offset; +// the offset must be <= f.Size(). +// f.Pos(f.Offset(p)) == p. +// +func (f *File) Pos(offset int) Pos { + if offset > f.size { + panic("illegal file offset") + } + return Pos(f.base + offset) +} + +// Offset returns the offset for the given file position p; +// p must be a valid Pos value in that file. +// f.Offset(f.Pos(offset)) == offset. +// +func (f *File) Offset(p Pos) int { + if int(p) < f.base || int(p) > f.base+f.size { + panic("illegal Pos value") + } + return int(p) - f.base +} + +// Line returns the line number for the given file position p; +// p must be a Pos value in that file or NoPos. +// +func (f *File) Line(p Pos) int { + // TODO(gri) this can be implemented much more efficiently + return f.Position(p).Line +} + +func searchLineInfos(a []lineInfo, x int) int { + return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1 +} + +// info returns the file name, line, and column number for a file offset. +func (f *File) info(offset int) (filename string, line, column int) { + filename = f.name + if i := searchInts(f.lines, offset); i >= 0 { + line, column = i+1, offset-f.lines[i]+1 + } + if len(f.infos) > 0 { + // almost no files have extra line infos + if i := searchLineInfos(f.infos, offset); i >= 0 { + alt := &f.infos[i] + filename = alt.Filename + if i := searchInts(f.lines, alt.Offset); i >= 0 { + line += alt.Line - i - 1 + } + } + } + return +} + +func (f *File) position(p Pos) (pos Position) { + offset := int(p) - f.base + pos.Offset = offset + pos.Filename, pos.Line, pos.Column = f.info(offset) + return +} + +// Position returns the Position value for the given file position p; +// p must be a Pos value in that file or NoPos. +// +func (f *File) Position(p Pos) (pos Position) { + if p != NoPos { + if int(p) < f.base || int(p) > f.base+f.size { + panic("illegal Pos value") + } + pos = f.position(p) + } + return +} + +// ----------------------------------------------------------------------------- +// FileSet + +// A FileSet represents a set of source files. +// Methods of file sets are synchronized; multiple goroutines +// may invoke them concurrently. +// +type FileSet struct { + mutex sync.RWMutex // protects the file set + base int // base offset for the next file + files []*File // list of files in the order added to the set + last *File // cache of last file looked up +} + +// NewFileSet creates a new file set. +func NewFileSet() *FileSet { + s := new(FileSet) + s.base = 1 // 0 == NoPos + return s +} + +// Base returns the minimum base offset that must be provided to +// AddFile when adding the next file. +// +func (s *FileSet) Base() int { + s.mutex.RLock() + b := s.base + s.mutex.RUnlock() + return b + +} + +// AddFile adds a new file with a given filename, base offset, and file size +// to the file set s and returns the file. Multiple files may have the same +// name. The base offset must not be smaller than the FileSet's Base(), and +// size must not be negative. +// +// Adding the file will set the file set's Base() value to base + size + 1 +// as the minimum base value for the next file. The following relationship +// exists between a Pos value p for a given file offset offs: +// +// int(p) = base + offs +// +// with offs in the range [0, size] and thus p in the range [base, base+size]. +// For convenience, File.Pos may be used to create file-specific position +// values from a file offset. +// +func (s *FileSet) AddFile(filename string, base, size int) *File { + s.mutex.Lock() + defer s.mutex.Unlock() + if base < s.base || size < 0 { + panic("illegal base or size") + } + // base >= s.base && size >= 0 + f := &File{s, filename, base, size, []int{0}, nil} + base += size + 1 // +1 because EOF also has a position + if base < 0 { + panic("token.Pos offset overflow (> 2G of source code in file set)") + } + // add the file to the file set + s.base = base + s.files = append(s.files, f) + s.last = f + return f +} + +// Iterate calls f for the files in the file set in the order they were added +// until f returns false. +// +func (s *FileSet) Iterate(f func(*File) bool) { + for i := 0; ; i++ { + var file *File + s.mutex.RLock() + if i < len(s.files) { + file = s.files[i] + } + s.mutex.RUnlock() + if file == nil || !f(file) { + break + } + } +} + +func searchFiles(a []*File, x int) int { + return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1 +} + +func (s *FileSet) file(p Pos) *File { + // common case: p is in last file + if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size { + return f + } + // p is not in last file - search all files + if i := searchFiles(s.files, int(p)); i >= 0 { + f := s.files[i] + // f.base <= int(p) by definition of searchFiles + if int(p) <= f.base+f.size { + s.last = f + return f + } + } + return nil +} + +// File returns the file that contains the position p. +// If no such file is found (for instance for p == NoPos), +// the result is nil. +// +func (s *FileSet) File(p Pos) (f *File) { + if p != NoPos { + s.mutex.RLock() + f = s.file(p) + s.mutex.RUnlock() + } + return +} + +// Position converts a Pos in the fileset into a general Position. +func (s *FileSet) Position(p Pos) (pos Position) { + if p != NoPos { + s.mutex.RLock() + if f := s.file(p); f != nil { + pos = f.position(p) + } + s.mutex.RUnlock() + } + return +} + +// ----------------------------------------------------------------------------- +// Helper functions + +func searchInts(a []int, x int) int { + // This function body is a manually inlined version of: + // + // return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1 + // + // With better compiler optimizations, this may not be needed in the + // future, but at the moment this change improves the go/printer + // benchmark performance by ~30%. This has a direct impact on the + // speed of gofmt and thus seems worthwhile (2011-04-29). + // TODO(gri): Remove this when compilers have caught up. + i, j := 0, len(a) + for i < j { + h := i + (j-i)/2 // avoid overflow when computing h + // i ≤ h < j + if a[h] <= x { + i = h + 1 + } else { + j = h + } + } + return i - 1 +} diff --git a/vendor/github.com/go-git/gcfg/token/serialize.go b/vendor/github.com/go-git/gcfg/token/serialize.go new file mode 100644 index 0000000000..4adc8f9e33 --- /dev/null +++ b/vendor/github.com/go-git/gcfg/token/serialize.go @@ -0,0 +1,56 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package token + +type serializedFile struct { + // fields correspond 1:1 to fields with same (lower-case) name in File + Name string + Base int + Size int + Lines []int + Infos []lineInfo +} + +type serializedFileSet struct { + Base int + Files []serializedFile +} + +// Read calls decode to deserialize a file set into s; s must not be nil. +func (s *FileSet) Read(decode func(interface{}) error) error { + var ss serializedFileSet + if err := decode(&ss); err != nil { + return err + } + + s.mutex.Lock() + s.base = ss.Base + files := make([]*File, len(ss.Files)) + for i := 0; i < len(ss.Files); i++ { + f := &ss.Files[i] + files[i] = &File{s, f.Name, f.Base, f.Size, f.Lines, f.Infos} + } + s.files = files + s.last = nil + s.mutex.Unlock() + + return nil +} + +// Write calls encode to serialize the file set s. +func (s *FileSet) Write(encode func(interface{}) error) error { + var ss serializedFileSet + + s.mutex.Lock() + ss.Base = s.base + files := make([]serializedFile, len(s.files)) + for i, f := range s.files { + files[i] = serializedFile{f.name, f.base, f.size, f.lines, f.infos} + } + ss.Files = files + s.mutex.Unlock() + + return encode(ss) +} diff --git a/vendor/github.com/go-git/gcfg/token/token.go b/vendor/github.com/go-git/gcfg/token/token.go new file mode 100644 index 0000000000..b3c7c83fa9 --- /dev/null +++ b/vendor/github.com/go-git/gcfg/token/token.go @@ -0,0 +1,83 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package token defines constants representing the lexical tokens of the gcfg +// configuration syntax and basic operations on tokens (printing, predicates). +// +// Note that the API for the token package may change to accommodate new +// features or implementation changes in gcfg. +// +package token + +import "strconv" + +// Token is the set of lexical tokens of the gcfg configuration syntax. +type Token int + +// The list of tokens. +const ( + // Special tokens + ILLEGAL Token = iota + EOF + COMMENT + + literal_beg + // Identifiers and basic type literals + // (these tokens stand for classes of literals) + IDENT // section-name, variable-name + STRING // "subsection-name", variable value + literal_end + + operator_beg + // Operators and delimiters + ASSIGN // = + LBRACK // [ + RBRACK // ] + EOL // \n + operator_end +) + +var tokens = [...]string{ + ILLEGAL: "ILLEGAL", + + EOF: "EOF", + COMMENT: "COMMENT", + + IDENT: "IDENT", + STRING: "STRING", + + ASSIGN: "=", + LBRACK: "[", + RBRACK: "]", + EOL: "\n", +} + +// String returns the string corresponding to the token tok. +// For operators and delimiters, the string is the actual token character +// sequence (e.g., for the token ASSIGN, the string is "="). For all other +// tokens the string corresponds to the token constant name (e.g. for the +// token IDENT, the string is "IDENT"). +// +func (tok Token) String() string { + s := "" + if 0 <= tok && tok < Token(len(tokens)) { + s = tokens[tok] + } + if s == "" { + s = "token(" + strconv.Itoa(int(tok)) + ")" + } + return s +} + +// Predicates + +// IsLiteral returns true for tokens corresponding to identifiers +// and basic type literals; it returns false otherwise. +// +func (tok Token) IsLiteral() bool { return literal_beg < tok && tok < literal_end } + +// IsOperator returns true for tokens corresponding to operators and +// delimiters; it returns false otherwise. +// +func (tok Token) IsOperator() bool { return operator_beg < tok && tok < operator_end } diff --git a/vendor/github.com/go-git/gcfg/types/bool.go b/vendor/github.com/go-git/gcfg/types/bool.go new file mode 100644 index 0000000000..8dcae0d8cf --- /dev/null +++ b/vendor/github.com/go-git/gcfg/types/bool.go @@ -0,0 +1,23 @@ +package types + +// BoolValues defines the name and value mappings for ParseBool. +var BoolValues = map[string]interface{}{ + "true": true, "yes": true, "on": true, "1": true, + "false": false, "no": false, "off": false, "0": false, +} + +var boolParser = func() *EnumParser { + ep := &EnumParser{} + ep.AddVals(BoolValues) + return ep +}() + +// ParseBool parses bool values according to the definitions in BoolValues. +// Parsing is case-insensitive. +func ParseBool(s string) (bool, error) { + v, err := boolParser.Parse(s) + if err != nil { + return false, err + } + return v.(bool), nil +} diff --git a/vendor/github.com/go-git/gcfg/types/doc.go b/vendor/github.com/go-git/gcfg/types/doc.go new file mode 100644 index 0000000000..9f9c345f6e --- /dev/null +++ b/vendor/github.com/go-git/gcfg/types/doc.go @@ -0,0 +1,4 @@ +// Package types defines helpers for type conversions. +// +// The API for this package is not finalized yet. +package types diff --git a/vendor/github.com/go-git/gcfg/types/enum.go b/vendor/github.com/go-git/gcfg/types/enum.go new file mode 100644 index 0000000000..1a0c7ef453 --- /dev/null +++ b/vendor/github.com/go-git/gcfg/types/enum.go @@ -0,0 +1,44 @@ +package types + +import ( + "fmt" + "reflect" + "strings" +) + +// EnumParser parses "enum" values; i.e. a predefined set of strings to +// predefined values. +type EnumParser struct { + Type string // type name; if not set, use type of first value added + CaseMatch bool // if true, matching of strings is case-sensitive + // PrefixMatch bool + vals map[string]interface{} +} + +// AddVals adds strings and values to an EnumParser. +func (ep *EnumParser) AddVals(vals map[string]interface{}) { + if ep.vals == nil { + ep.vals = make(map[string]interface{}) + } + for k, v := range vals { + if ep.Type == "" { + ep.Type = reflect.TypeOf(v).Name() + } + if !ep.CaseMatch { + k = strings.ToLower(k) + } + ep.vals[k] = v + } +} + +// Parse parses the string and returns the value or an error. +func (ep EnumParser) Parse(s string) (interface{}, error) { + if !ep.CaseMatch { + s = strings.ToLower(s) + } + v, ok := ep.vals[s] + if !ok { + return false, fmt.Errorf("failed to parse %s %#q", ep.Type, s) + } + return v, nil +} diff --git a/vendor/github.com/go-git/gcfg/types/int.go b/vendor/github.com/go-git/gcfg/types/int.go new file mode 100644 index 0000000000..af7e75c125 --- /dev/null +++ b/vendor/github.com/go-git/gcfg/types/int.go @@ -0,0 +1,86 @@ +package types + +import ( + "fmt" + "strings" +) + +// An IntMode is a mode for parsing integer values, representing a set of +// accepted bases. +type IntMode uint8 + +// IntMode values for ParseInt; can be combined using binary or. +const ( + Dec IntMode = 1 << iota + Hex + Oct +) + +// String returns a string representation of IntMode; e.g. `IntMode(Dec|Hex)`. +func (m IntMode) String() string { + var modes []string + if m&Dec != 0 { + modes = append(modes, "Dec") + } + if m&Hex != 0 { + modes = append(modes, "Hex") + } + if m&Oct != 0 { + modes = append(modes, "Oct") + } + return "IntMode(" + strings.Join(modes, "|") + ")" +} + +var errIntAmbig = fmt.Errorf("ambiguous integer value; must include '0' prefix") + +func prefix0(val string) bool { + return strings.HasPrefix(val, "0") || strings.HasPrefix(val, "-0") +} + +func prefix0x(val string) bool { + return strings.HasPrefix(val, "0x") || strings.HasPrefix(val, "-0x") +} + +// ParseInt parses val using mode into intptr, which must be a pointer to an +// integer kind type. Non-decimal value require prefix `0` or `0x` in the cases +// when mode permits ambiguity of base; otherwise the prefix can be omitted. +func ParseInt(intptr interface{}, val string, mode IntMode) error { + val = strings.TrimSpace(val) + verb := byte(0) + switch mode { + case Dec: + verb = 'd' + case Dec + Hex: + if prefix0x(val) { + verb = 'v' + } else { + verb = 'd' + } + case Dec + Oct: + if prefix0(val) && !prefix0x(val) { + verb = 'v' + } else { + verb = 'd' + } + case Dec + Hex + Oct: + verb = 'v' + case Hex: + if prefix0x(val) { + verb = 'v' + } else { + verb = 'x' + } + case Oct: + verb = 'o' + case Hex + Oct: + if prefix0(val) { + verb = 'v' + } else { + return errIntAmbig + } + } + if verb == 0 { + panic("unsupported mode") + } + return ScanFully(intptr, val, verb) +} diff --git a/vendor/github.com/go-git/gcfg/types/scan.go b/vendor/github.com/go-git/gcfg/types/scan.go new file mode 100644 index 0000000000..db2f6ed3ca --- /dev/null +++ b/vendor/github.com/go-git/gcfg/types/scan.go @@ -0,0 +1,23 @@ +package types + +import ( + "fmt" + "io" + "reflect" +) + +// ScanFully uses fmt.Sscanf with verb to fully scan val into ptr. +func ScanFully(ptr interface{}, val string, verb byte) error { + t := reflect.ValueOf(ptr).Elem().Type() + // attempt to read extra bytes to make sure the value is consumed + var b []byte + n, err := fmt.Sscanf(val, "%"+string(verb)+"%s", ptr, &b) + switch { + case n < 1 || n == 1 && err != io.EOF: + return fmt.Errorf("failed to parse %q as %v: %v", val, t, err) + case n > 1: + return fmt.Errorf("failed to parse %q as %v: extra characters %q", val, t, string(b)) + } + // n == 1 && err == io.EOF + return nil +} diff --git a/vendor/github.com/go-git/go-billy/v5/.gitignore b/vendor/github.com/go-git/go-billy/v5/.gitignore new file mode 100644 index 0000000000..7aeb46699c --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/.gitignore @@ -0,0 +1,4 @@ +/coverage.txt +/vendor +Gopkg.lock +Gopkg.toml diff --git a/vendor/github.com/pelletier/go-toml/LICENSE b/vendor/github.com/go-git/go-billy/v5/LICENSE similarity index 84% rename from vendor/github.com/pelletier/go-toml/LICENSE rename to vendor/github.com/go-git/go-billy/v5/LICENSE index f414553c21..9d60756894 100644 --- a/vendor/github.com/pelletier/go-toml/LICENSE +++ b/vendor/github.com/go-git/go-billy/v5/LICENSE @@ -1,49 +1,3 @@ -The bulk of github.com/pelletier/go-toml is distributed under the MIT license -(see below), with the exception of localtime.go and localtime.test.go. -Those two files have been copied over from Google's civil library at revision -ed46f5086358513cf8c25f8e3f022cb838a49d66, and are distributed under the Apache -2.0 license (see below). - - -github.com/pelletier/go-toml: - - -The MIT License (MIT) - -Copyright (c) 2013 - 2021 Thomas Pelletier, Eric Anderton - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -localtime.go, localtime_test.go: - -Originals: - https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil.go - https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil_test.go -Changes: - * Renamed files from civil* to localtime*. - * Package changed from civil to toml. - * 'Local' prefix added to all structs. -License: - https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/LICENSE - - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -224,7 +178,7 @@ License: APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" + boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -232,7 +186,7 @@ License: same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2017 Sourced Technologies S.L. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/go-git/go-billy/v5/README.md b/vendor/github.com/go-git/go-billy/v5/README.md new file mode 100644 index 0000000000..da5c074782 --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/README.md @@ -0,0 +1,73 @@ +# go-billy [![GoDoc](https://godoc.org/gopkg.in/go-git/go-billy.v5?status.svg)](https://pkg.go.dev/github.com/go-git/go-billy/v5) [![Test](https://github.com/go-git/go-billy/workflows/Test/badge.svg)](https://github.com/go-git/go-billy/actions?query=workflow%3ATest) + +The missing interface filesystem abstraction for Go. +Billy implements an interface based on the `os` standard library, allowing to develop applications without dependency on the underlying storage. Makes it virtually free to implement mocks and testing over filesystem operations. + +Billy was born as part of [go-git/go-git](https://github.com/go-git/go-git) project. + +## Installation + +```go +import "github.com/go-git/go-billy/v5" // with go modules enabled (GO111MODULE=on or outside GOPATH) +import "github.com/go-git/go-billy" // with go modules disabled +``` + +## Usage + +Billy exposes filesystems using the +[`Filesystem` interface](https://pkg.go.dev/github.com/go-git/go-billy/v5?tab=doc#Filesystem). +Each filesystem implementation gives you a `New` method, whose arguments depend on +the implementation itself, that returns a new `Filesystem`. + +The following example caches in memory all readable files in a directory from any +billy's filesystem implementation. + +```go +func LoadToMemory(origin billy.Filesystem, path string) (*memory.Memory, error) { + memory := memory.New() + + files, err := origin.ReadDir("/") + if err != nil { + return nil, err + } + + for _, file := range files { + if file.IsDir() { + continue + } + + src, err := origin.Open(file.Name()) + if err != nil { + return nil, err + } + + dst, err := memory.Create(file.Name()) + if err != nil { + return nil, err + } + + if _, err = io.Copy(dst, src); err != nil { + return nil, err + } + + if err := dst.Close(); err != nil { + return nil, err + } + + if err := src.Close(); err != nil { + return nil, err + } + } + + return memory, nil +} +``` + +## Why billy? + +The library billy deals with storage systems and Billy is the name of a well-known, IKEA +bookcase. That's it. + +## License + +Apache License Version 2.0, see [LICENSE](LICENSE) diff --git a/vendor/github.com/go-git/go-billy/v5/fs.go b/vendor/github.com/go-git/go-billy/v5/fs.go new file mode 100644 index 0000000000..a9efccdeb2 --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/fs.go @@ -0,0 +1,202 @@ +package billy + +import ( + "errors" + "io" + "os" + "time" +) + +var ( + ErrReadOnly = errors.New("read-only filesystem") + ErrNotSupported = errors.New("feature not supported") + ErrCrossedBoundary = errors.New("chroot boundary crossed") +) + +// Capability holds the supported features of a billy filesystem. This does +// not mean that the capability has to be supported by the underlying storage. +// For example, a billy filesystem may support WriteCapability but the +// storage be mounted in read only mode. +type Capability uint64 + +const ( + // WriteCapability means that the fs is writable. + WriteCapability Capability = 1 << iota + // ReadCapability means that the fs is readable. + ReadCapability + // ReadAndWriteCapability is the ability to open a file in read and write mode. + ReadAndWriteCapability + // SeekCapability means it is able to move position inside the file. + SeekCapability + // TruncateCapability means that a file can be truncated. + TruncateCapability + // LockCapability is the ability to lock a file. + LockCapability + + // DefaultCapabilities lists all capable features supported by filesystems + // without Capability interface. This list should not be changed until a + // major version is released. + DefaultCapabilities Capability = WriteCapability | ReadCapability | + ReadAndWriteCapability | SeekCapability | TruncateCapability | + LockCapability + + // AllCapabilities lists all capable features. + AllCapabilities Capability = WriteCapability | ReadCapability | + ReadAndWriteCapability | SeekCapability | TruncateCapability | + LockCapability +) + +// Filesystem abstract the operations in a storage-agnostic interface. +// Each method implementation mimics the behavior of the equivalent functions +// at the os package from the standard library. +type Filesystem interface { + Basic + TempFile + Dir + Symlink + Chroot +} + +// Basic abstract the basic operations in a storage-agnostic interface as +// an extension to the Basic interface. +type Basic interface { + // Create creates the named file with mode 0666 (before umask), truncating + // it if it already exists. If successful, methods on the returned File can + // be used for I/O; the associated file descriptor has mode O_RDWR. + Create(filename string) (File, error) + // Open opens the named file for reading. If successful, methods on the + // returned file can be used for reading; the associated file descriptor has + // mode O_RDONLY. + Open(filename string) (File, error) + // OpenFile is the generalized open call; most users will use Open or Create + // instead. It opens the named file with specified flag (O_RDONLY etc.) and + // perm, (0666 etc.) if applicable. If successful, methods on the returned + // File can be used for I/O. + OpenFile(filename string, flag int, perm os.FileMode) (File, error) + // Stat returns a FileInfo describing the named file. + Stat(filename string) (os.FileInfo, error) + // Rename renames (moves) oldpath to newpath. If newpath already exists and + // is not a directory, Rename replaces it. OS-specific restrictions may + // apply when oldpath and newpath are in different directories. + Rename(oldpath, newpath string) error + // Remove removes the named file or directory. + Remove(filename string) error + // Join joins any number of path elements into a single path, adding a + // Separator if necessary. Join calls filepath.Clean on the result; in + // particular, all empty strings are ignored. On Windows, the result is a + // UNC path if and only if the first path element is a UNC path. + Join(elem ...string) string +} + +type TempFile interface { + // TempFile creates a new temporary file in the directory dir with a name + // beginning with prefix, opens the file for reading and writing, and + // returns the resulting *os.File. If dir is the empty string, TempFile + // uses the default directory for temporary files (see os.TempDir). + // Multiple programs calling TempFile simultaneously will not choose the + // same file. The caller can use f.Name() to find the pathname of the file. + // It is the caller's responsibility to remove the file when no longer + // needed. + TempFile(dir, prefix string) (File, error) +} + +// Dir abstract the dir related operations in a storage-agnostic interface as +// an extension to the Basic interface. +type Dir interface { + // ReadDir reads the directory named by dirname and returns a list of + // directory entries sorted by filename. + ReadDir(path string) ([]os.FileInfo, error) + // MkdirAll creates a directory named path, along with any necessary + // parents, and returns nil, or else returns an error. The permission bits + // perm are used for all directories that MkdirAll creates. If path is/ + // already a directory, MkdirAll does nothing and returns nil. + MkdirAll(filename string, perm os.FileMode) error +} + +// Symlink abstract the symlink related operations in a storage-agnostic +// interface as an extension to the Basic interface. +type Symlink interface { + // Lstat returns a FileInfo describing the named file. If the file is a + // symbolic link, the returned FileInfo describes the symbolic link. Lstat + // makes no attempt to follow the link. + Lstat(filename string) (os.FileInfo, error) + // Symlink creates a symbolic-link from link to target. target may be an + // absolute or relative path, and need not refer to an existing node. + // Parent directories of link are created as necessary. + Symlink(target, link string) error + // Readlink returns the target path of link. + Readlink(link string) (string, error) +} + +// Change abstract the FileInfo change related operations in a storage-agnostic +// interface as an extension to the Basic interface +type Change interface { + // Chmod changes the mode of the named file to mode. If the file is a + // symbolic link, it changes the mode of the link's target. + Chmod(name string, mode os.FileMode) error + // Lchown changes the numeric uid and gid of the named file. If the file is + // a symbolic link, it changes the uid and gid of the link itself. + Lchown(name string, uid, gid int) error + // Chown changes the numeric uid and gid of the named file. If the file is a + // symbolic link, it changes the uid and gid of the link's target. + Chown(name string, uid, gid int) error + // Chtimes changes the access and modification times of the named file, + // similar to the Unix utime() or utimes() functions. + // + // The underlying filesystem may truncate or round the values to a less + // precise time unit. + Chtimes(name string, atime time.Time, mtime time.Time) error +} + +// Chroot abstract the chroot related operations in a storage-agnostic interface +// as an extension to the Basic interface. +type Chroot interface { + // Chroot returns a new filesystem from the same type where the new root is + // the given path. Files outside of the designated directory tree cannot be + // accessed. + Chroot(path string) (Filesystem, error) + // Root returns the root path of the filesystem. + Root() string +} + +// File represent a file, being a subset of the os.File +type File interface { + // Name returns the name of the file as presented to Open. + Name() string + io.Writer + io.Reader + io.ReaderAt + io.Seeker + io.Closer + // Lock locks the file like e.g. flock. It protects against access from + // other processes. + Lock() error + // Unlock unlocks the file. + Unlock() error + // Truncate the file. + Truncate(size int64) error +} + +// Capable interface can return the available features of a filesystem. +type Capable interface { + // Capabilities returns the capabilities of a filesystem in bit flags. + Capabilities() Capability +} + +// Capabilities returns the features supported by a filesystem. If the FS +// does not implement Capable interface it returns all features. +func Capabilities(fs Basic) Capability { + capable, ok := fs.(Capable) + if !ok { + return DefaultCapabilities + } + + return capable.Capabilities() +} + +// CapabilityCheck tests the filesystem for the provided capabilities and +// returns true in case it supports all of them. +func CapabilityCheck(fs Basic, capabilities Capability) bool { + fsCaps := Capabilities(fs) + return fsCaps&capabilities == capabilities +} diff --git a/vendor/github.com/go-git/go-billy/v5/helper/chroot/chroot.go b/vendor/github.com/go-git/go-billy/v5/helper/chroot/chroot.go new file mode 100644 index 0000000000..8b44e784bd --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/helper/chroot/chroot.go @@ -0,0 +1,242 @@ +package chroot + +import ( + "os" + "path/filepath" + "strings" + + "github.com/go-git/go-billy/v5" + "github.com/go-git/go-billy/v5/helper/polyfill" +) + +// ChrootHelper is a helper to implement billy.Chroot. +type ChrootHelper struct { + underlying billy.Filesystem + base string +} + +// New creates a new filesystem wrapping up the given 'fs'. +// The created filesystem has its base in the given ChrootHelperectory of the +// underlying filesystem. +func New(fs billy.Basic, base string) billy.Filesystem { + return &ChrootHelper{ + underlying: polyfill.New(fs), + base: base, + } +} + +func (fs *ChrootHelper) underlyingPath(filename string) (string, error) { + if isCrossBoundaries(filename) { + return "", billy.ErrCrossedBoundary + } + + return fs.Join(fs.Root(), filename), nil +} + +func isCrossBoundaries(path string) bool { + path = filepath.ToSlash(path) + path = filepath.Clean(path) + + return strings.HasPrefix(path, ".."+string(filepath.Separator)) +} + +func (fs *ChrootHelper) Create(filename string) (billy.File, error) { + fullpath, err := fs.underlyingPath(filename) + if err != nil { + return nil, err + } + + f, err := fs.underlying.Create(fullpath) + if err != nil { + return nil, err + } + + return newFile(fs, f, filename), nil +} + +func (fs *ChrootHelper) Open(filename string) (billy.File, error) { + fullpath, err := fs.underlyingPath(filename) + if err != nil { + return nil, err + } + + f, err := fs.underlying.Open(fullpath) + if err != nil { + return nil, err + } + + return newFile(fs, f, filename), nil +} + +func (fs *ChrootHelper) OpenFile(filename string, flag int, mode os.FileMode) (billy.File, error) { + fullpath, err := fs.underlyingPath(filename) + if err != nil { + return nil, err + } + + f, err := fs.underlying.OpenFile(fullpath, flag, mode) + if err != nil { + return nil, err + } + + return newFile(fs, f, filename), nil +} + +func (fs *ChrootHelper) Stat(filename string) (os.FileInfo, error) { + fullpath, err := fs.underlyingPath(filename) + if err != nil { + return nil, err + } + + return fs.underlying.Stat(fullpath) +} + +func (fs *ChrootHelper) Rename(from, to string) error { + var err error + from, err = fs.underlyingPath(from) + if err != nil { + return err + } + + to, err = fs.underlyingPath(to) + if err != nil { + return err + } + + return fs.underlying.Rename(from, to) +} + +func (fs *ChrootHelper) Remove(path string) error { + fullpath, err := fs.underlyingPath(path) + if err != nil { + return err + } + + return fs.underlying.Remove(fullpath) +} + +func (fs *ChrootHelper) Join(elem ...string) string { + return fs.underlying.Join(elem...) +} + +func (fs *ChrootHelper) TempFile(dir, prefix string) (billy.File, error) { + fullpath, err := fs.underlyingPath(dir) + if err != nil { + return nil, err + } + + f, err := fs.underlying.(billy.TempFile).TempFile(fullpath, prefix) + if err != nil { + return nil, err + } + + return newFile(fs, f, fs.Join(dir, filepath.Base(f.Name()))), nil +} + +func (fs *ChrootHelper) ReadDir(path string) ([]os.FileInfo, error) { + fullpath, err := fs.underlyingPath(path) + if err != nil { + return nil, err + } + + return fs.underlying.(billy.Dir).ReadDir(fullpath) +} + +func (fs *ChrootHelper) MkdirAll(filename string, perm os.FileMode) error { + fullpath, err := fs.underlyingPath(filename) + if err != nil { + return err + } + + return fs.underlying.(billy.Dir).MkdirAll(fullpath, perm) +} + +func (fs *ChrootHelper) Lstat(filename string) (os.FileInfo, error) { + fullpath, err := fs.underlyingPath(filename) + if err != nil { + return nil, err + } + + return fs.underlying.(billy.Symlink).Lstat(fullpath) +} + +func (fs *ChrootHelper) Symlink(target, link string) error { + target = filepath.FromSlash(target) + + // only rewrite target if it's already absolute + if filepath.IsAbs(target) || strings.HasPrefix(target, string(filepath.Separator)) { + target = fs.Join(fs.Root(), target) + target = filepath.Clean(filepath.FromSlash(target)) + } + + link, err := fs.underlyingPath(link) + if err != nil { + return err + } + + return fs.underlying.(billy.Symlink).Symlink(target, link) +} + +func (fs *ChrootHelper) Readlink(link string) (string, error) { + fullpath, err := fs.underlyingPath(link) + if err != nil { + return "", err + } + + target, err := fs.underlying.(billy.Symlink).Readlink(fullpath) + if err != nil { + return "", err + } + + if !filepath.IsAbs(target) && !strings.HasPrefix(target, string(filepath.Separator)) { + return target, nil + } + + target, err = filepath.Rel(fs.base, target) + if err != nil { + return "", err + } + + return string(os.PathSeparator) + target, nil +} + +func (fs *ChrootHelper) Chroot(path string) (billy.Filesystem, error) { + fullpath, err := fs.underlyingPath(path) + if err != nil { + return nil, err + } + + return New(fs.underlying, fullpath), nil +} + +func (fs *ChrootHelper) Root() string { + return fs.base +} + +func (fs *ChrootHelper) Underlying() billy.Basic { + return fs.underlying +} + +// Capabilities implements the Capable interface. +func (fs *ChrootHelper) Capabilities() billy.Capability { + return billy.Capabilities(fs.underlying) +} + +type file struct { + billy.File + name string +} + +func newFile(fs billy.Filesystem, f billy.File, filename string) billy.File { + filename = fs.Join(fs.Root(), filename) + filename, _ = filepath.Rel(fs.Root(), filename) + + return &file{ + File: f, + name: filename, + } +} + +func (f *file) Name() string { + return f.name +} diff --git a/vendor/github.com/go-git/go-billy/v5/helper/polyfill/polyfill.go b/vendor/github.com/go-git/go-billy/v5/helper/polyfill/polyfill.go new file mode 100644 index 0000000000..1efce0e7b8 --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/helper/polyfill/polyfill.go @@ -0,0 +1,105 @@ +package polyfill + +import ( + "os" + "path/filepath" + + "github.com/go-git/go-billy/v5" +) + +// Polyfill is a helper that implements all missing method from billy.Filesystem. +type Polyfill struct { + billy.Basic + c capabilities +} + +type capabilities struct{ tempfile, dir, symlink, chroot bool } + +// New creates a new filesystem wrapping up 'fs' the intercepts all the calls +// made and errors if fs doesn't implement any of the billy interfaces. +func New(fs billy.Basic) billy.Filesystem { + if original, ok := fs.(billy.Filesystem); ok { + return original + } + + h := &Polyfill{Basic: fs} + + _, h.c.tempfile = h.Basic.(billy.TempFile) + _, h.c.dir = h.Basic.(billy.Dir) + _, h.c.symlink = h.Basic.(billy.Symlink) + _, h.c.chroot = h.Basic.(billy.Chroot) + return h +} + +func (h *Polyfill) TempFile(dir, prefix string) (billy.File, error) { + if !h.c.tempfile { + return nil, billy.ErrNotSupported + } + + return h.Basic.(billy.TempFile).TempFile(dir, prefix) +} + +func (h *Polyfill) ReadDir(path string) ([]os.FileInfo, error) { + if !h.c.dir { + return nil, billy.ErrNotSupported + } + + return h.Basic.(billy.Dir).ReadDir(path) +} + +func (h *Polyfill) MkdirAll(filename string, perm os.FileMode) error { + if !h.c.dir { + return billy.ErrNotSupported + } + + return h.Basic.(billy.Dir).MkdirAll(filename, perm) +} + +func (h *Polyfill) Symlink(target, link string) error { + if !h.c.symlink { + return billy.ErrNotSupported + } + + return h.Basic.(billy.Symlink).Symlink(target, link) +} + +func (h *Polyfill) Readlink(link string) (string, error) { + if !h.c.symlink { + return "", billy.ErrNotSupported + } + + return h.Basic.(billy.Symlink).Readlink(link) +} + +func (h *Polyfill) Lstat(path string) (os.FileInfo, error) { + if !h.c.symlink { + return nil, billy.ErrNotSupported + } + + return h.Basic.(billy.Symlink).Lstat(path) +} + +func (h *Polyfill) Chroot(path string) (billy.Filesystem, error) { + if !h.c.chroot { + return nil, billy.ErrNotSupported + } + + return h.Basic.(billy.Chroot).Chroot(path) +} + +func (h *Polyfill) Root() string { + if !h.c.chroot { + return string(filepath.Separator) + } + + return h.Basic.(billy.Chroot).Root() +} + +func (h *Polyfill) Underlying() billy.Basic { + return h.Basic +} + +// Capabilities implements the Capable interface. +func (h *Polyfill) Capabilities() billy.Capability { + return billy.Capabilities(h.Basic) +} diff --git a/vendor/github.com/go-git/go-billy/v5/memfs/memory.go b/vendor/github.com/go-git/go-billy/v5/memfs/memory.go new file mode 100644 index 0000000000..f217693e6f --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/memfs/memory.go @@ -0,0 +1,410 @@ +// Package memfs provides a billy filesystem base on memory. +package memfs // import "github.com/go-git/go-billy/v5/memfs" + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/go-git/go-billy/v5" + "github.com/go-git/go-billy/v5/helper/chroot" + "github.com/go-git/go-billy/v5/util" +) + +const separator = filepath.Separator + +// Memory a very convenient filesystem based on memory files +type Memory struct { + s *storage + + tempCount int +} + +//New returns a new Memory filesystem. +func New() billy.Filesystem { + fs := &Memory{s: newStorage()} + return chroot.New(fs, string(separator)) +} + +func (fs *Memory) Create(filename string) (billy.File, error) { + return fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) +} + +func (fs *Memory) Open(filename string) (billy.File, error) { + return fs.OpenFile(filename, os.O_RDONLY, 0) +} + +func (fs *Memory) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) { + f, has := fs.s.Get(filename) + if !has { + if !isCreate(flag) { + return nil, os.ErrNotExist + } + + var err error + f, err = fs.s.New(filename, perm, flag) + if err != nil { + return nil, err + } + } else { + if isExclusive(flag) { + return nil, os.ErrExist + } + + if target, isLink := fs.resolveLink(filename, f); isLink { + return fs.OpenFile(target, flag, perm) + } + } + + if f.mode.IsDir() { + return nil, fmt.Errorf("cannot open directory: %s", filename) + } + + return f.Duplicate(filename, perm, flag), nil +} + +var errNotLink = errors.New("not a link") + +func (fs *Memory) resolveLink(fullpath string, f *file) (target string, isLink bool) { + if !isSymlink(f.mode) { + return fullpath, false + } + + target = string(f.content.bytes) + if !isAbs(target) { + target = fs.Join(filepath.Dir(fullpath), target) + } + + return target, true +} + +// On Windows OS, IsAbs validates if a path is valid based on if stars with a +// unit (eg.: `C:\`) to assert that is absolute, but in this mem implementation +// any path starting by `separator` is also considered absolute. +func isAbs(path string) bool { + return filepath.IsAbs(path) || strings.HasPrefix(path, string(separator)) +} + +func (fs *Memory) Stat(filename string) (os.FileInfo, error) { + f, has := fs.s.Get(filename) + if !has { + return nil, os.ErrNotExist + } + + fi, _ := f.Stat() + + var err error + if target, isLink := fs.resolveLink(filename, f); isLink { + fi, err = fs.Stat(target) + if err != nil { + return nil, err + } + } + + // the name of the file should always the name of the stated file, so we + // overwrite the Stat returned from the storage with it, since the + // filename may belong to a link. + fi.(*fileInfo).name = filepath.Base(filename) + return fi, nil +} + +func (fs *Memory) Lstat(filename string) (os.FileInfo, error) { + f, has := fs.s.Get(filename) + if !has { + return nil, os.ErrNotExist + } + + return f.Stat() +} + +type ByName []os.FileInfo + +func (a ByName) Len() int { return len(a) } +func (a ByName) Less(i, j int) bool { return a[i].Name() < a[j].Name() } +func (a ByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +func (fs *Memory) ReadDir(path string) ([]os.FileInfo, error) { + if f, has := fs.s.Get(path); has { + if target, isLink := fs.resolveLink(path, f); isLink { + return fs.ReadDir(target) + } + } + + var entries []os.FileInfo + for _, f := range fs.s.Children(path) { + fi, _ := f.Stat() + entries = append(entries, fi) + } + + sort.Sort(ByName(entries)) + + return entries, nil +} + +func (fs *Memory) MkdirAll(path string, perm os.FileMode) error { + _, err := fs.s.New(path, perm|os.ModeDir, 0) + return err +} + +func (fs *Memory) TempFile(dir, prefix string) (billy.File, error) { + return util.TempFile(fs, dir, prefix) +} + +func (fs *Memory) getTempFilename(dir, prefix string) string { + fs.tempCount++ + filename := fmt.Sprintf("%s_%d_%d", prefix, fs.tempCount, time.Now().UnixNano()) + return fs.Join(dir, filename) +} + +func (fs *Memory) Rename(from, to string) error { + return fs.s.Rename(from, to) +} + +func (fs *Memory) Remove(filename string) error { + return fs.s.Remove(filename) +} + +func (fs *Memory) Join(elem ...string) string { + return filepath.Join(elem...) +} + +func (fs *Memory) Symlink(target, link string) error { + _, err := fs.Stat(link) + if err == nil { + return os.ErrExist + } + + if !os.IsNotExist(err) { + return err + } + + return util.WriteFile(fs, link, []byte(target), 0777|os.ModeSymlink) +} + +func (fs *Memory) Readlink(link string) (string, error) { + f, has := fs.s.Get(link) + if !has { + return "", os.ErrNotExist + } + + if !isSymlink(f.mode) { + return "", &os.PathError{ + Op: "readlink", + Path: link, + Err: fmt.Errorf("not a symlink"), + } + } + + return string(f.content.bytes), nil +} + +// Capabilities implements the Capable interface. +func (fs *Memory) Capabilities() billy.Capability { + return billy.WriteCapability | + billy.ReadCapability | + billy.ReadAndWriteCapability | + billy.SeekCapability | + billy.TruncateCapability +} + +type file struct { + name string + content *content + position int64 + flag int + mode os.FileMode + + isClosed bool +} + +func (f *file) Name() string { + return f.name +} + +func (f *file) Read(b []byte) (int, error) { + n, err := f.ReadAt(b, f.position) + f.position += int64(n) + + if err == io.EOF && n != 0 { + err = nil + } + + return n, err +} + +func (f *file) ReadAt(b []byte, off int64) (int, error) { + if f.isClosed { + return 0, os.ErrClosed + } + + if !isReadAndWrite(f.flag) && !isReadOnly(f.flag) { + return 0, errors.New("read not supported") + } + + n, err := f.content.ReadAt(b, off) + + return n, err +} + +func (f *file) Seek(offset int64, whence int) (int64, error) { + if f.isClosed { + return 0, os.ErrClosed + } + + switch whence { + case io.SeekCurrent: + f.position += offset + case io.SeekStart: + f.position = offset + case io.SeekEnd: + f.position = int64(f.content.Len()) + offset + } + + return f.position, nil +} + +func (f *file) Write(p []byte) (int, error) { + if f.isClosed { + return 0, os.ErrClosed + } + + if !isReadAndWrite(f.flag) && !isWriteOnly(f.flag) { + return 0, errors.New("write not supported") + } + + n, err := f.content.WriteAt(p, f.position) + f.position += int64(n) + + return n, err +} + +func (f *file) Close() error { + if f.isClosed { + return os.ErrClosed + } + + f.isClosed = true + return nil +} + +func (f *file) Truncate(size int64) error { + if size < int64(len(f.content.bytes)) { + f.content.bytes = f.content.bytes[:size] + } else if more := int(size) - len(f.content.bytes); more > 0 { + f.content.bytes = append(f.content.bytes, make([]byte, more)...) + } + + return nil +} + +func (f *file) Duplicate(filename string, mode os.FileMode, flag int) billy.File { + new := &file{ + name: filename, + content: f.content, + mode: mode, + flag: flag, + } + + if isAppend(flag) { + new.position = int64(new.content.Len()) + } + + if isTruncate(flag) { + new.content.Truncate() + } + + return new +} + +func (f *file) Stat() (os.FileInfo, error) { + return &fileInfo{ + name: f.Name(), + mode: f.mode, + size: f.content.Len(), + }, nil +} + +// Lock is a no-op in memfs. +func (f *file) Lock() error { + return nil +} + +// Unlock is a no-op in memfs. +func (f *file) Unlock() error { + return nil +} + +type fileInfo struct { + name string + size int + mode os.FileMode +} + +func (fi *fileInfo) Name() string { + return fi.name +} + +func (fi *fileInfo) Size() int64 { + return int64(fi.size) +} + +func (fi *fileInfo) Mode() os.FileMode { + return fi.mode +} + +func (*fileInfo) ModTime() time.Time { + return time.Now() +} + +func (fi *fileInfo) IsDir() bool { + return fi.mode.IsDir() +} + +func (*fileInfo) Sys() interface{} { + return nil +} + +func (c *content) Truncate() { + c.bytes = make([]byte, 0) +} + +func (c *content) Len() int { + return len(c.bytes) +} + +func isCreate(flag int) bool { + return flag&os.O_CREATE != 0 +} + +func isExclusive(flag int) bool { + return flag&os.O_EXCL != 0 +} + +func isAppend(flag int) bool { + return flag&os.O_APPEND != 0 +} + +func isTruncate(flag int) bool { + return flag&os.O_TRUNC != 0 +} + +func isReadAndWrite(flag int) bool { + return flag&os.O_RDWR != 0 +} + +func isReadOnly(flag int) bool { + return flag == os.O_RDONLY +} + +func isWriteOnly(flag int) bool { + return flag&os.O_WRONLY != 0 +} + +func isSymlink(m os.FileMode) bool { + return m&os.ModeSymlink != 0 +} diff --git a/vendor/github.com/go-git/go-billy/v5/memfs/storage.go b/vendor/github.com/go-git/go-billy/v5/memfs/storage.go new file mode 100644 index 0000000000..d3ff5a25db --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/memfs/storage.go @@ -0,0 +1,229 @@ +package memfs + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" +) + +type storage struct { + files map[string]*file + children map[string]map[string]*file +} + +func newStorage() *storage { + return &storage{ + files: make(map[string]*file, 0), + children: make(map[string]map[string]*file, 0), + } +} + +func (s *storage) Has(path string) bool { + path = clean(path) + + _, ok := s.files[path] + return ok +} + +func (s *storage) New(path string, mode os.FileMode, flag int) (*file, error) { + path = clean(path) + if s.Has(path) { + if !s.MustGet(path).mode.IsDir() { + return nil, fmt.Errorf("file already exists %q", path) + } + + return nil, nil + } + + name := filepath.Base(path) + + f := &file{ + name: name, + content: &content{name: name}, + mode: mode, + flag: flag, + } + + s.files[path] = f + s.createParent(path, mode, f) + return f, nil +} + +func (s *storage) createParent(path string, mode os.FileMode, f *file) error { + base := filepath.Dir(path) + base = clean(base) + if f.Name() == string(separator) { + return nil + } + + if _, err := s.New(base, mode.Perm()|os.ModeDir, 0); err != nil { + return err + } + + if _, ok := s.children[base]; !ok { + s.children[base] = make(map[string]*file, 0) + } + + s.children[base][f.Name()] = f + return nil +} + +func (s *storage) Children(path string) []*file { + path = clean(path) + + l := make([]*file, 0) + for _, f := range s.children[path] { + l = append(l, f) + } + + return l +} + +func (s *storage) MustGet(path string) *file { + f, ok := s.Get(path) + if !ok { + panic(fmt.Errorf("couldn't find %q", path)) + } + + return f +} + +func (s *storage) Get(path string) (*file, bool) { + path = clean(path) + if !s.Has(path) { + return nil, false + } + + file, ok := s.files[path] + return file, ok +} + +func (s *storage) Rename(from, to string) error { + from = clean(from) + to = clean(to) + + if !s.Has(from) { + return os.ErrNotExist + } + + move := [][2]string{{from, to}} + + for pathFrom := range s.files { + if pathFrom == from || !filepath.HasPrefix(pathFrom, from) { + continue + } + + rel, _ := filepath.Rel(from, pathFrom) + pathTo := filepath.Join(to, rel) + + move = append(move, [2]string{pathFrom, pathTo}) + } + + for _, ops := range move { + from := ops[0] + to := ops[1] + + if err := s.move(from, to); err != nil { + return err + } + } + + return nil +} + +func (s *storage) move(from, to string) error { + s.files[to] = s.files[from] + s.files[to].name = filepath.Base(to) + s.children[to] = s.children[from] + + defer func() { + delete(s.children, from) + delete(s.files, from) + delete(s.children[filepath.Dir(from)], filepath.Base(from)) + }() + + return s.createParent(to, 0644, s.files[to]) +} + +func (s *storage) Remove(path string) error { + path = clean(path) + + f, has := s.Get(path) + if !has { + return os.ErrNotExist + } + + if f.mode.IsDir() && len(s.children[path]) != 0 { + return fmt.Errorf("dir: %s contains files", path) + } + + base, file := filepath.Split(path) + base = filepath.Clean(base) + + delete(s.children[base], file) + delete(s.files, path) + return nil +} + +func clean(path string) string { + return filepath.Clean(filepath.FromSlash(path)) +} + +type content struct { + name string + bytes []byte +} + +func (c *content) WriteAt(p []byte, off int64) (int, error) { + if off < 0 { + return 0, &os.PathError{ + Op: "writeat", + Path: c.name, + Err: errors.New("negative offset"), + } + } + + prev := len(c.bytes) + + diff := int(off) - prev + if diff > 0 { + c.bytes = append(c.bytes, make([]byte, diff)...) + } + + c.bytes = append(c.bytes[:off], p...) + if len(c.bytes) < prev { + c.bytes = c.bytes[:prev] + } + + return len(p), nil +} + +func (c *content) ReadAt(b []byte, off int64) (n int, err error) { + if off < 0 { + return 0, &os.PathError{ + Op: "readat", + Path: c.name, + Err: errors.New("negative offset"), + } + } + + size := int64(len(c.bytes)) + if off >= size { + return 0, io.EOF + } + + l := int64(len(b)) + if off+l > size { + l = size - off + } + + btr := c.bytes[off : off+l] + if len(btr) < len(b) { + err = io.EOF + } + n = copy(b, btr) + + return +} diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os.go b/vendor/github.com/go-git/go-billy/v5/osfs/os.go new file mode 100644 index 0000000000..9665d2755d --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/osfs/os.go @@ -0,0 +1,144 @@ +// +build !js + +// Package osfs provides a billy filesystem for the OS. +package osfs // import "github.com/go-git/go-billy/v5/osfs" + +import ( + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/go-git/go-billy/v5" + "github.com/go-git/go-billy/v5/helper/chroot" +) + +const ( + defaultDirectoryMode = 0755 + defaultCreateMode = 0666 +) + +// Default Filesystem representing the root of the os filesystem. +var Default = &OS{} + +// OS is a filesystem based on the os filesystem. +type OS struct{} + +// New returns a new OS filesystem. +func New(baseDir string) billy.Filesystem { + return chroot.New(Default, baseDir) +} + +func (fs *OS) Create(filename string) (billy.File, error) { + return fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, defaultCreateMode) +} + +func (fs *OS) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) { + if flag&os.O_CREATE != 0 { + if err := fs.createDir(filename); err != nil { + return nil, err + } + } + + f, err := os.OpenFile(filename, flag, perm) + if err != nil { + return nil, err + } + return &file{File: f}, err +} + +func (fs *OS) createDir(fullpath string) error { + dir := filepath.Dir(fullpath) + if dir != "." { + if err := os.MkdirAll(dir, defaultDirectoryMode); err != nil { + return err + } + } + + return nil +} + +func (fs *OS) ReadDir(path string) ([]os.FileInfo, error) { + l, err := ioutil.ReadDir(path) + if err != nil { + return nil, err + } + + var s = make([]os.FileInfo, len(l)) + for i, f := range l { + s[i] = f + } + + return s, nil +} + +func (fs *OS) Rename(from, to string) error { + if err := fs.createDir(to); err != nil { + return err + } + + return rename(from, to) +} + +func (fs *OS) MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, defaultDirectoryMode) +} + +func (fs *OS) Open(filename string) (billy.File, error) { + return fs.OpenFile(filename, os.O_RDONLY, 0) +} + +func (fs *OS) Stat(filename string) (os.FileInfo, error) { + return os.Stat(filename) +} + +func (fs *OS) Remove(filename string) error { + return os.Remove(filename) +} + +func (fs *OS) TempFile(dir, prefix string) (billy.File, error) { + if err := fs.createDir(dir + string(os.PathSeparator)); err != nil { + return nil, err + } + + f, err := ioutil.TempFile(dir, prefix) + if err != nil { + return nil, err + } + return &file{File: f}, nil +} + +func (fs *OS) Join(elem ...string) string { + return filepath.Join(elem...) +} + +func (fs *OS) RemoveAll(path string) error { + return os.RemoveAll(filepath.Clean(path)) +} + +func (fs *OS) Lstat(filename string) (os.FileInfo, error) { + return os.Lstat(filepath.Clean(filename)) +} + +func (fs *OS) Symlink(target, link string) error { + if err := fs.createDir(link); err != nil { + return err + } + + return os.Symlink(target, link) +} + +func (fs *OS) Readlink(link string) (string, error) { + return os.Readlink(link) +} + +// Capabilities implements the Capable interface. +func (fs *OS) Capabilities() billy.Capability { + return billy.DefaultCapabilities +} + +// file is a wrapper for an os.File which adds support for file locking. +type file struct { + *os.File + m sync.Mutex +} diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_js.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_js.go new file mode 100644 index 0000000000..8ae68fed68 --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/osfs/os_js.go @@ -0,0 +1,21 @@ +// +build js + +package osfs + +import ( + "github.com/go-git/go-billy/v5" + "github.com/go-git/go-billy/v5/helper/chroot" + "github.com/go-git/go-billy/v5/memfs" +) + +// globalMemFs is the global memory fs +var globalMemFs = memfs.New() + +// Default Filesystem representing the root of in-memory filesystem for a +// js/wasm environment. +var Default = memfs.New() + +// New returns a new OS filesystem. +func New(baseDir string) billy.Filesystem { + return chroot.New(Default, Default.Join("/", baseDir)) +} diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_plan9.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_plan9.go new file mode 100644 index 0000000000..e8f519ffe2 --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/osfs/os_plan9.go @@ -0,0 +1,85 @@ +// +build plan9 + +package osfs + +import ( + "io" + "os" + "path/filepath" + "syscall" +) + +func (f *file) Lock() error { + // Plan 9 uses a mode bit instead of explicit lock/unlock syscalls. + // + // Per http://man.cat-v.org/plan_9/5/stat: “Exclusive use files may be open + // for I/O by only one fid at a time across all clients of the server. If a + // second open is attempted, it draws an error.” + // + // There is no obvious way to implement this function using the exclusive use bit. + // See https://golang.org/src/cmd/go/internal/lockedfile/lockedfile_plan9.go + // for how file locking is done by the go tool on Plan 9. + return nil +} + +func (f *file) Unlock() error { + return nil +} + +func rename(from, to string) error { + // If from and to are in different directories, copy the file + // since Plan 9 does not support cross-directory rename. + if filepath.Dir(from) != filepath.Dir(to) { + fi, err := os.Stat(from) + if err != nil { + return &os.LinkError{"rename", from, to, err} + } + if fi.Mode().IsDir() { + return &os.LinkError{"rename", from, to, syscall.EISDIR} + } + fromFile, err := os.Open(from) + if err != nil { + return &os.LinkError{"rename", from, to, err} + } + toFile, err := os.OpenFile(to, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, fi.Mode()) + if err != nil { + return &os.LinkError{"rename", from, to, err} + } + _, err = io.Copy(toFile, fromFile) + if err != nil { + return &os.LinkError{"rename", from, to, err} + } + + // Copy mtime and mode from original file. + // We need only one syscall if we avoid os.Chmod and os.Chtimes. + dir := fi.Sys().(*syscall.Dir) + var d syscall.Dir + d.Null() + d.Mtime = dir.Mtime + d.Mode = dir.Mode + if err = dirwstat(to, &d); err != nil { + return &os.LinkError{"rename", from, to, err} + } + + // Remove original file. + err = os.Remove(from) + if err != nil { + return &os.LinkError{"rename", from, to, err} + } + return nil + } + return os.Rename(from, to) +} + +func dirwstat(name string, d *syscall.Dir) error { + var buf [syscall.STATFIXLEN]byte + + n, err := d.Marshal(buf[:]) + if err != nil { + return &os.PathError{"dirwstat", name, err} + } + if err = syscall.Wstat(name, buf[:n]); err != nil { + return &os.PathError{"dirwstat", name, err} + } + return nil +} diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go new file mode 100644 index 0000000000..c74d60ee6b --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go @@ -0,0 +1,27 @@ +// +build !plan9,!windows,!js + +package osfs + +import ( + "os" + + "golang.org/x/sys/unix" +) + +func (f *file) Lock() error { + f.m.Lock() + defer f.m.Unlock() + + return unix.Flock(int(f.File.Fd()), unix.LOCK_EX) +} + +func (f *file) Unlock() error { + f.m.Lock() + defer f.m.Unlock() + + return unix.Flock(int(f.File.Fd()), unix.LOCK_UN) +} + +func rename(from, to string) error { + return os.Rename(from, to) +} diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_windows.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_windows.go new file mode 100644 index 0000000000..8f5caeb0ed --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/osfs/os_windows.go @@ -0,0 +1,61 @@ +// +build windows + +package osfs + +import ( + "os" + "runtime" + "unsafe" + + "golang.org/x/sys/windows" +) + +type fileInfo struct { + os.FileInfo + name string +} + +func (fi *fileInfo) Name() string { + return fi.name +} + +var ( + kernel32DLL = windows.NewLazySystemDLL("kernel32.dll") + lockFileExProc = kernel32DLL.NewProc("LockFileEx") + unlockFileProc = kernel32DLL.NewProc("UnlockFile") +) + +const ( + lockfileExclusiveLock = 0x2 +) + +func (f *file) Lock() error { + f.m.Lock() + defer f.m.Unlock() + + var overlapped windows.Overlapped + // err is always non-nil as per sys/windows semantics. + ret, _, err := lockFileExProc.Call(f.File.Fd(), lockfileExclusiveLock, 0, 0xFFFFFFFF, 0, + uintptr(unsafe.Pointer(&overlapped))) + runtime.KeepAlive(&overlapped) + if ret == 0 { + return err + } + return nil +} + +func (f *file) Unlock() error { + f.m.Lock() + defer f.m.Unlock() + + // err is always non-nil as per sys/windows semantics. + ret, _, err := unlockFileProc.Call(f.File.Fd(), 0, 0, 0xFFFFFFFF, 0) + if ret == 0 { + return err + } + return nil +} + +func rename(from, to string) error { + return os.Rename(from, to) +} diff --git a/vendor/github.com/go-git/go-billy/v5/util/glob.go b/vendor/github.com/go-git/go-billy/v5/util/glob.go new file mode 100644 index 0000000000..f7cb1de896 --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/util/glob.go @@ -0,0 +1,111 @@ +package util + +import ( + "path/filepath" + "sort" + "strings" + + "github.com/go-git/go-billy/v5" +) + +// Glob returns the names of all files matching pattern or nil +// if there is no matching file. The syntax of patterns is the same +// as in Match. The pattern may describe hierarchical names such as +// /usr/*/bin/ed (assuming the Separator is '/'). +// +// Glob ignores file system errors such as I/O errors reading directories. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +// +// Function originally from https://golang.org/src/path/filepath/match_test.go +func Glob(fs billy.Filesystem, pattern string) (matches []string, err error) { + if !hasMeta(pattern) { + if _, err = fs.Lstat(pattern); err != nil { + return nil, nil + } + return []string{pattern}, nil + } + + dir, file := filepath.Split(pattern) + // Prevent infinite recursion. See issue 15879. + if dir == pattern { + return nil, filepath.ErrBadPattern + } + + var m []string + m, err = Glob(fs, cleanGlobPath(dir)) + if err != nil { + return + } + for _, d := range m { + matches, err = glob(fs, d, file, matches) + if err != nil { + return + } + } + return +} + +// cleanGlobPath prepares path for glob matching. +func cleanGlobPath(path string) string { + switch path { + case "": + return "." + case string(filepath.Separator): + // do nothing to the path + return path + default: + return path[0 : len(path)-1] // chop off trailing separator + } +} + +// glob searches for files matching pattern in the directory dir +// and appends them to matches. If the directory cannot be +// opened, it returns the existing matches. New matches are +// added in lexicographical order. +func glob(fs billy.Filesystem, dir, pattern string, matches []string) (m []string, e error) { + m = matches + fi, err := fs.Stat(dir) + if err != nil { + return + } + + if !fi.IsDir() { + return + } + + names, _ := readdirnames(fs, dir) + sort.Strings(names) + + for _, n := range names { + matched, err := filepath.Match(pattern, n) + if err != nil { + return m, err + } + if matched { + m = append(m, filepath.Join(dir, n)) + } + } + return +} + +// hasMeta reports whether path contains any of the magic characters +// recognized by Match. +func hasMeta(path string) bool { + // TODO(niemeyer): Should other magic characters be added here? + return strings.ContainsAny(path, "*?[") +} + +func readdirnames(fs billy.Filesystem, dir string) ([]string, error) { + files, err := fs.ReadDir(dir) + if err != nil { + return nil, err + } + + var names []string + for _, file := range files { + names = append(names, file.Name()) + } + + return names, nil +} diff --git a/vendor/github.com/go-git/go-billy/v5/util/util.go b/vendor/github.com/go-git/go-billy/v5/util/util.go new file mode 100644 index 0000000000..5c77128c3c --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/util/util.go @@ -0,0 +1,282 @@ +package util + +import ( + "io" + "os" + "path/filepath" + "strconv" + "sync" + "time" + + "github.com/go-git/go-billy/v5" +) + +// RemoveAll removes path and any children it contains. It removes everything it +// can but returns the first error it encounters. If the path does not exist, +// RemoveAll returns nil (no error). +func RemoveAll(fs billy.Basic, path string) error { + fs, path = getUnderlyingAndPath(fs, path) + + if r, ok := fs.(removerAll); ok { + return r.RemoveAll(path) + } + + return removeAll(fs, path) +} + +type removerAll interface { + RemoveAll(string) error +} + +func removeAll(fs billy.Basic, path string) error { + // This implementation is adapted from os.RemoveAll. + + // Simple case: if Remove works, we're done. + err := fs.Remove(path) + if err == nil || os.IsNotExist(err) { + return nil + } + + // Otherwise, is this a directory we need to recurse into? + dir, serr := fs.Stat(path) + if serr != nil { + if os.IsNotExist(serr) { + return nil + } + + return serr + } + + if !dir.IsDir() { + // Not a directory; return the error from Remove. + return err + } + + dirfs, ok := fs.(billy.Dir) + if !ok { + return billy.ErrNotSupported + } + + // Directory. + fis, err := dirfs.ReadDir(path) + if err != nil { + if os.IsNotExist(err) { + // Race. It was deleted between the Lstat and Open. + // Return nil per RemoveAll's docs. + return nil + } + + return err + } + + // Remove contents & return first error. + err = nil + for _, fi := range fis { + cpath := fs.Join(path, fi.Name()) + err1 := removeAll(fs, cpath) + if err == nil { + err = err1 + } + } + + // Remove directory. + err1 := fs.Remove(path) + if err1 == nil || os.IsNotExist(err1) { + return nil + } + + if err == nil { + err = err1 + } + + return err + +} + +// WriteFile writes data to a file named by filename in the given filesystem. +// If the file does not exist, WriteFile creates it with permissions perm; +// otherwise WriteFile truncates it before writing. +func WriteFile(fs billy.Basic, filename string, data []byte, perm os.FileMode) error { + f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + + if err1 := f.Close(); err == nil { + err = err1 + } + + return err +} + +// Random number state. +// We generate random temporary file names so that there's a good +// chance the file doesn't exist yet - keeps the number of tries in +// TempFile to a minimum. +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} + +func nextSuffix() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// TempFile creates a new temporary file in the directory dir with a name +// beginning with prefix, opens the file for reading and writing, and returns +// the resulting *os.File. If dir is the empty string, TempFile uses the default +// directory for temporary files (see os.TempDir). Multiple programs calling +// TempFile simultaneously will not choose the same file. The caller can use +// f.Name() to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func TempFile(fs billy.Basic, dir, prefix string) (f billy.File, err error) { + // This implementation is based on stdlib ioutil.TempFile. + if dir == "" { + dir = getTempDir(fs) + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextSuffix()) + f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + break + } + return +} + +// TempDir creates a new temporary directory in the directory dir +// with a name beginning with prefix and returns the path of the +// new directory. If dir is the empty string, TempDir uses the +// default directory for temporary files (see os.TempDir). +// Multiple programs calling TempDir simultaneously +// will not choose the same directory. It is the caller's responsibility +// to remove the directory when no longer needed. +func TempDir(fs billy.Dir, dir, prefix string) (name string, err error) { + // This implementation is based on stdlib ioutil.TempDir + + if dir == "" { + dir = getTempDir(fs.(billy.Basic)) + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + try := filepath.Join(dir, prefix+nextSuffix()) + err = fs.MkdirAll(try, 0700) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + if os.IsNotExist(err) { + if _, err := os.Stat(dir); os.IsNotExist(err) { + return "", err + } + } + if err == nil { + name = try + } + break + } + return +} + +func getTempDir(fs billy.Basic) string { + ch, ok := fs.(billy.Chroot) + if !ok || ch.Root() == "" || ch.Root() == "/" || ch.Root() == string(filepath.Separator) { + return os.TempDir() + } + + return ".tmp" +} + +type underlying interface { + Underlying() billy.Basic +} + +func getUnderlyingAndPath(fs billy.Basic, path string) (billy.Basic, string) { + u, ok := fs.(underlying) + if !ok { + return fs, path + } + if ch, ok := fs.(billy.Chroot); ok { + path = fs.Join(ch.Root(), path) + } + + return u.Underlying(), path +} + +// ReadFile reads the named file and returns the contents from the given filesystem. +// A successful call returns err == nil, not err == EOF. +// Because ReadFile reads the whole file, it does not treat an EOF from Read +// as an error to be reported. +func ReadFile(fs billy.Basic, name string) ([]byte, error) { + f, err := fs.Open(name) + if err != nil { + return nil, err + } + + defer f.Close() + + var size int + if info, err := fs.Stat(name); err == nil { + size64 := info.Size() + if int64(int(size64)) == size64 { + size = int(size64) + } + } + + size++ // one byte for final read at EOF + // If a file claims a small size, read at least 512 bytes. + // In particular, files in Linux's /proc claim size 0 but + // then do not work right if read in small pieces, + // so an initial read of 1 byte would not work correctly. + + if size < 512 { + size = 512 + } + + data := make([]byte, 0, size) + for { + if len(data) >= cap(data) { + d := append(data[:cap(data)], 0) + data = d[:len(data)] + } + + n, err := f.Read(data[len(data):cap(data)]) + data = data[:len(data)+n] + + if err != nil { + if err == io.EOF { + err = nil + } + + return data, err + } + } +} diff --git a/vendor/github.com/go-git/go-git/v5/.gitignore b/vendor/github.com/go-git/go-git/v5/.gitignore new file mode 100644 index 0000000000..038dd9f1ed --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/.gitignore @@ -0,0 +1,4 @@ +coverage.out +*~ +coverage.txt +profile.out diff --git a/vendor/github.com/go-git/go-git/v5/CODE_OF_CONDUCT.md b/vendor/github.com/go-git/go-git/v5/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..a689fa3c34 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +education, socio-economic status, nationality, personal appearance, race, +religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at conduct@sourced.tech. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + diff --git a/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md b/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md new file mode 100644 index 0000000000..2a72b501e2 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md @@ -0,0 +1,111 @@ +Supported Capabilities +====================== + +Here is a non-comprehensive table of git commands and features whose equivalent +is supported by go-git. + +| Feature | Status | Notes | +|---------------------------------------|--------|-------| +| **config** | +| config | ✔ | Reading and modifying per-repository configuration (`.git/config`) is supported. Global configuration (`$HOME/.gitconfig`) is not. | +| **getting and creating repositories** | +| init | ✔ | Plain init and `--bare` are supported. Flags `--template`, `--separate-git-dir` and `--shared` are not. | +| clone | ✔ | Plain clone and equivalents to `--progress`, `--single-branch`, `--depth`, `--origin`, `--recurse-submodules` are supported. Others are not. | +| **basic snapshotting** | +| add | ✔ | Plain add is supported. Any other flags aren't supported | +| status | ✔ | +| commit | ✔ | +| reset | ✔ | +| rm | ✔ | +| mv | ✔ | +| **branching and merging** | +| branch | ✔ | +| checkout | ✔ | Basic usages of checkout are supported. | +| merge | ✖ | +| mergetool | ✖ | +| stash | ✖ | +| tag | ✔ | +| **sharing and updating projects** | +| fetch | ✔ | +| pull | ✔ | Only supports merges where the merge can be resolved as a fast-forward. | +| push | ✔ | +| remote | ✔ | +| submodule | ✔ | +| **inspection and comparison** | +| show | ✔ | +| log | ✔ | +| shortlog | (see log) | +| describe | | +| **patching** | +| apply | ✖ | +| cherry-pick | ✖ | +| diff | ✔ | Patch object with UnifiedDiff output representation | +| rebase | ✖ | +| revert | ✖ | +| **debugging** | +| bisect | ✖ | +| blame | ✔ | +| grep | ✔ | +| **email** || +| am | ✖ | +| apply | ✖ | +| format-patch | ✖ | +| send-email | ✖ | +| request-pull | ✖ | +| **external systems** | +| svn | ✖ | +| fast-import | ✖ | +| **administration** | +| clean | ✔ | +| gc | ✖ | +| fsck | ✖ | +| reflog | ✖ | +| filter-branch | ✖ | +| instaweb | ✖ | +| archive | ✖ | +| bundle | ✖ | +| prune | ✖ | +| repack | ✖ | +| **server admin** | +| daemon | | +| update-server-info | | +| **advanced** | +| notes | ✖ | +| replace | ✖ | +| worktree | ✖ | +| annotate | (see blame) | +| **gpg** | +| git-verify-commit | ✔ | +| git-verify-tag | ✔ | +| **plumbing commands** | +| cat-file | ✔ | +| check-ignore | | +| commit-tree | | +| count-objects | | +| diff-index | | +| for-each-ref | ✔ | +| hash-object | ✔ | +| ls-files | ✔ | +| merge-base | ✔ | Calculates the merge-base only between two commits, and supports `--independent` and `--is-ancestor` modifiers; Does not support `--fork-point` nor `--octopus` modifiers. | +| read-tree | | +| rev-list | ✔ | +| rev-parse | | +| show-ref | ✔ | +| symbolic-ref | ✔ | +| update-index | | +| update-ref | | +| verify-pack | | +| write-tree | | +| **protocols** | +| http(s):// (dumb) | ✖ | +| http(s):// (smart) | ✔ | +| git:// | ✔ | +| ssh:// | ✔ | +| file:// | partial | Warning: this is not pure Golang. This shells out to the `git` binary. | +| custom | ✔ | +| **other features** | +| gitignore | ✔ | +| gitattributes | ✖ | +| index version | | +| packfile version | | +| push-certs | ✖ | diff --git a/vendor/github.com/go-git/go-git/v5/CONTRIBUTING.md b/vendor/github.com/go-git/go-git/v5/CONTRIBUTING.md new file mode 100644 index 0000000000..fce25328a7 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/CONTRIBUTING.md @@ -0,0 +1,46 @@ +# Contributing Guidelines + +source{d} go-git project is [Apache 2.0 licensed](LICENSE) and accepts +contributions via GitHub pull requests. This document outlines some of the +conventions on development workflow, commit message formatting, contact points, +and other resources to make it easier to get your contribution accepted. + +## Support Channels + +The official support channels, for both users and contributors, are: + +- [StackOverflow go-git tag](https://stackoverflow.com/questions/tagged/go-git) for user questions. +- GitHub [Issues](https://github.com/src-d/go-git/issues)* for bug reports and feature requests. + +*Before opening a new issue or submitting a new pull request, it's helpful to +search the project - it's likely that another user has already reported the +issue you're facing, or it's a known issue that we're already aware of. + + +## How to Contribute + +Pull Requests (PRs) are the main and exclusive way to contribute to the official go-git project. +In order for a PR to be accepted it needs to pass a list of requirements: + +- You should be able to run the same query using `git`. We don't accept features that are not implemented in the official git implementation. +- The expected behavior must match the [official git implementation](https://github.com/git/git). +- The actual behavior must be correctly explained with natural language and providing a minimum working example in Go that reproduces it. +- All PRs must be written in idiomatic Go, formatted according to [gofmt](https://golang.org/cmd/gofmt/), and without any warnings from [go lint](https://github.com/golang/lint) nor [go vet](https://golang.org/cmd/vet/). +- They should in general include tests, and those shall pass. +- If the PR is a bug fix, it has to include a suite of unit tests for the new functionality. +- If the PR is a new feature, it has to come with a suite of unit tests, that tests the new functionality. +- In any case, all the PRs have to pass the personal evaluation of at least one of the maintainers of go-git. + +### Format of the commit message + +Every commit message should describe what was changed, under which context and, if applicable, the GitHub issue it relates to: + +``` +plumbing: packp, Skip argument validations for unknown capabilities. Fixes #623 +``` + +The format can be described more formally as follows: + +``` +: , . [Fixes #] +``` diff --git a/vendor/github.com/go-git/go-git/v5/LICENSE b/vendor/github.com/go-git/go-git/v5/LICENSE new file mode 100644 index 0000000000..8aa3d854cf --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018 Sourced Technologies, S.L. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-git/go-git/v5/Makefile b/vendor/github.com/go-git/go-git/v5/Makefile new file mode 100644 index 0000000000..d10922fb10 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/Makefile @@ -0,0 +1,38 @@ +# General +WORKDIR = $(PWD) + +# Go parameters +GOCMD = go +GOTEST = $(GOCMD) test + +# Git config +GIT_VERSION ?= +GIT_DIST_PATH ?= $(PWD)/.git-dist +GIT_REPOSITORY = http://github.com/git/git.git + +# Coverage +COVERAGE_REPORT = coverage.out +COVERAGE_MODE = count + +build-git: + @if [ -f $(GIT_DIST_PATH)/git ]; then \ + echo "nothing to do, using cache $(GIT_DIST_PATH)"; \ + else \ + git clone $(GIT_REPOSITORY) -b $(GIT_VERSION) --depth 1 --single-branch $(GIT_DIST_PATH); \ + cd $(GIT_DIST_PATH); \ + make configure; \ + ./configure; \ + make all; \ + fi + +test: + @echo "running against `git version`"; \ + $(GOTEST) ./... + +test-coverage: + @echo "running against `git version`"; \ + echo "" > $(COVERAGE_REPORT); \ + $(GOTEST) -coverprofile=$(COVERAGE_REPORT) -coverpkg=./... -covermode=$(COVERAGE_MODE) ./... + +clean: + rm -rf $(GIT_DIST_PATH) \ No newline at end of file diff --git a/vendor/github.com/go-git/go-git/v5/README.md b/vendor/github.com/go-git/go-git/v5/README.md new file mode 100644 index 0000000000..ff0c9b72ba --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/README.md @@ -0,0 +1,131 @@ +![go-git logo](https://cdn.rawgit.com/src-d/artwork/02036484/go-git/files/go-git-github-readme-header.png) +[![GoDoc](https://godoc.org/github.com/go-git/go-git/v5?status.svg)](https://pkg.go.dev/github.com/go-git/go-git/v5) [![Build Status](https://github.com/go-git/go-git/workflows/Test/badge.svg)](https://github.com/go-git/go-git/actions) [![Go Report Card](https://goreportcard.com/badge/github.com/go-git/go-git)](https://goreportcard.com/report/github.com/go-git/go-git) + +*go-git* is a highly extensible git implementation library written in **pure Go**. + +It can be used to manipulate git repositories at low level *(plumbing)* or high level *(porcelain)*, through an idiomatic Go API. It also supports several types of storage, such as in-memory filesystems, or custom implementations, thanks to the [`Storer`](https://pkg.go.dev/github.com/go-git/go-git/v5/plumbing/storer) interface. + +It's being actively developed since 2015 and is being used extensively by [Keybase](https://keybase.io/blog/encrypted-git-for-everyone), [Gitea](https://gitea.io/en-us/) or [Pulumi](https://github.com/search?q=org%3Apulumi+go-git&type=Code), and by many other libraries and tools. + +Project Status +-------------- + +After the legal issues with the [`src-d`](https://github.com/src-d) organization, the lack of update for four months and the requirement to make a hard fork, the project is **now back to normality**. + +The project is currently actively maintained by individual contributors, including several of the original authors, but also backed by a new company, [gitsight](https://github.com/gitsight), where `go-git` is a critical component used at scale. + + +Comparison with git +------------------- + +*go-git* aims to be fully compatible with [git](https://github.com/git/git), all the *porcelain* operations are implemented to work exactly as *git* does. + +*git* is a humongous project with years of development by thousands of contributors, making it challenging for *go-git* to implement all the features. You can find a comparison of *go-git* vs *git* in the [compatibility documentation](COMPATIBILITY.md). + + +Installation +------------ + +The recommended way to install *go-git* is: + +```go +import "github.com/go-git/go-git/v5" // with go modules enabled (GO111MODULE=on or outside GOPATH) +import "github.com/go-git/go-git" // with go modules disabled +``` + + +Examples +-------- + +> Please note that the `CheckIfError` and `Info` functions used in the examples are from the [examples package](https://github.com/go-git/go-git/blob/master/_examples/common.go#L19) just to be used in the examples. + + +### Basic example + +A basic example that mimics the standard `git clone` command + +```go +// Clone the given repository to the given directory +Info("git clone https://github.com/go-git/go-git") + +_, err := git.PlainClone("/tmp/foo", false, &git.CloneOptions{ + URL: "https://github.com/go-git/go-git", + Progress: os.Stdout, +}) + +CheckIfError(err) +``` + +Outputs: +``` +Counting objects: 4924, done. +Compressing objects: 100% (1333/1333), done. +Total 4924 (delta 530), reused 6 (delta 6), pack-reused 3533 +``` + +### In-memory example + +Cloning a repository into memory and printing the history of HEAD, just like `git log` does + + +```go +// Clones the given repository in memory, creating the remote, the local +// branches and fetching the objects, exactly as: +Info("git clone https://github.com/go-git/go-billy") + +r, err := git.Clone(memory.NewStorage(), nil, &git.CloneOptions{ + URL: "https://github.com/go-git/go-billy", +}) + +CheckIfError(err) + +// Gets the HEAD history from HEAD, just like this command: +Info("git log") + +// ... retrieves the branch pointed by HEAD +ref, err := r.Head() +CheckIfError(err) + + +// ... retrieves the commit history +cIter, err := r.Log(&git.LogOptions{From: ref.Hash()}) +CheckIfError(err) + +// ... just iterates over the commits, printing it +err = cIter.ForEach(func(c *object.Commit) error { + fmt.Println(c) + return nil +}) +CheckIfError(err) +``` + +Outputs: +``` +commit ded8054fd0c3994453e9c8aacaf48d118d42991e +Author: Santiago M. Mola +Date: Sat Nov 12 21:18:41 2016 +0100 + + index: ReadFrom/WriteTo returns IndexReadError/IndexWriteError. (#9) + +commit df707095626f384ce2dc1a83b30f9a21d69b9dfc +Author: Santiago M. Mola +Date: Fri Nov 11 13:23:22 2016 +0100 + + readwriter: fix bug when writing index. (#10) + + When using ReadWriter on an existing siva file, absolute offset for + index entries was not being calculated correctly. +... +``` + +You can find this [example](_examples/log/main.go) and many others in the [examples](_examples) folder. + +Contribute +---------- + +[Contributions](https://github.com/go-git/go-git/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) are more than welcome, if you are interested please take a look to +our [Contributing Guidelines](CONTRIBUTING.md). + +License +------- +Apache License Version 2.0, see [LICENSE](LICENSE) diff --git a/vendor/github.com/go-git/go-git/v5/blame.go b/vendor/github.com/go-git/go-git/v5/blame.go new file mode 100644 index 0000000000..43634b32ca --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/blame.go @@ -0,0 +1,302 @@ +package git + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/object" + "github.com/go-git/go-git/v5/utils/diff" +) + +// BlameResult represents the result of a Blame operation. +type BlameResult struct { + // Path is the path of the File that we're blaming. + Path string + // Rev (Revision) is the hash of the specified Commit used to generate this result. + Rev plumbing.Hash + // Lines contains every line with its authorship. + Lines []*Line +} + +// Blame returns a BlameResult with the information about the last author of +// each line from file `path` at commit `c`. +func Blame(c *object.Commit, path string) (*BlameResult, error) { + // The file to blame is identified by the input arguments: + // commit and path. commit is a Commit object obtained from a Repository. Path + // represents a path to a specific file contained into the repository. + // + // Blaming a file is a two step process: + // + // 1. Create a linear history of the commits affecting a file. We use + // revlist.New for that. + // + // 2. Then build a graph with a node for every line in every file in + // the history of the file. + // + // Each node is assigned a commit: Start by the nodes in the first + // commit. Assign that commit as the creator of all its lines. + // + // Then jump to the nodes in the next commit, and calculate the diff + // between the two files. Newly created lines get + // assigned the new commit as its origin. Modified lines also get + // this new commit. Untouched lines retain the old commit. + // + // All this work is done in the assignOrigin function which holds all + // the internal relevant data in a "blame" struct, that is not + // exported. + // + // TODO: ways to improve the efficiency of this function: + // 1. Improve revlist + // 2. Improve how to traverse the history (example a backward traversal will + // be much more efficient) + // + // TODO: ways to improve the function in general: + // 1. Add memoization between revlist and assign. + // 2. It is using much more memory than needed, see the TODOs below. + + b := new(blame) + b.fRev = c + b.path = path + + // get all the file revisions + if err := b.fillRevs(); err != nil { + return nil, err + } + + // calculate the line tracking graph and fill in + // file contents in data. + if err := b.fillGraphAndData(); err != nil { + return nil, err + } + + file, err := b.fRev.File(b.path) + if err != nil { + return nil, err + } + finalLines, err := file.Lines() + if err != nil { + return nil, err + } + + // Each node (line) holds the commit where it was introduced or + // last modified. To achieve that we use the FORWARD algorithm + // described in Zimmermann, et al. "Mining Version Archives for + // Co-changed Lines", in proceedings of the Mining Software + // Repositories workshop, Shanghai, May 22-23, 2006. + lines, err := newLines(finalLines, b.sliceGraph(len(b.graph)-1)) + if err != nil { + return nil, err + } + + return &BlameResult{ + Path: path, + Rev: c.Hash, + Lines: lines, + }, nil +} + +// Line values represent the contents and author of a line in BlamedResult values. +type Line struct { + // Author is the email address of the last author that modified the line. + Author string + // Text is the original text of the line. + Text string + // Date is when the original text of the line was introduced + Date time.Time + // Hash is the commit hash that introduced the original line + Hash plumbing.Hash +} + +func newLine(author, text string, date time.Time, hash plumbing.Hash) *Line { + return &Line{ + Author: author, + Text: text, + Hash: hash, + Date: date, + } +} + +func newLines(contents []string, commits []*object.Commit) ([]*Line, error) { + lcontents := len(contents) + lcommits := len(commits) + + if lcontents != lcommits { + if lcontents == lcommits-1 && contents[lcontents-1] != "\n" { + contents = append(contents, "\n") + } else { + return nil, errors.New("contents and commits have different length") + } + } + + result := make([]*Line, 0, lcontents) + for i := range contents { + result = append(result, newLine( + commits[i].Author.Email, contents[i], + commits[i].Author.When, commits[i].Hash, + )) + } + + return result, nil +} + +// this struct is internally used by the blame function to hold its +// inputs, outputs and state. +type blame struct { + // the path of the file to blame + path string + // the commit of the final revision of the file to blame + fRev *object.Commit + // the chain of revisions affecting the the file to blame + revs []*object.Commit + // the contents of the file across all its revisions + data []string + // the graph of the lines in the file across all the revisions + graph [][]*object.Commit +} + +// calculate the history of a file "path", starting from commit "from", sorted by commit date. +func (b *blame) fillRevs() error { + var err error + + b.revs, err = references(b.fRev, b.path) + return err +} + +// build graph of a file from its revision history +func (b *blame) fillGraphAndData() error { + //TODO: not all commits are needed, only the current rev and the prev + b.graph = make([][]*object.Commit, len(b.revs)) + b.data = make([]string, len(b.revs)) // file contents in all the revisions + // for every revision of the file, starting with the first + // one... + for i, rev := range b.revs { + // get the contents of the file + file, err := rev.File(b.path) + if err != nil { + return nil + } + b.data[i], err = file.Contents() + if err != nil { + return err + } + nLines := countLines(b.data[i]) + // create a node for each line + b.graph[i] = make([]*object.Commit, nLines) + // assign a commit to each node + // if this is the first revision, then the node is assigned to + // this first commit. + if i == 0 { + for j := 0; j < nLines; j++ { + b.graph[i][j] = b.revs[i] + } + } else { + // if this is not the first commit, then assign to the old + // commit or to the new one, depending on what the diff + // says. + b.assignOrigin(i, i-1) + } + } + return nil +} + +// sliceGraph returns a slice of commits (one per line) for a particular +// revision of a file (0=first revision). +func (b *blame) sliceGraph(i int) []*object.Commit { + fVs := b.graph[i] + result := make([]*object.Commit, 0, len(fVs)) + for _, v := range fVs { + c := *v + result = append(result, &c) + } + return result +} + +// Assigns origin to vertexes in current (c) rev from data in its previous (p) +// revision +func (b *blame) assignOrigin(c, p int) { + // assign origin based on diff info + hunks := diff.Do(b.data[p], b.data[c]) + sl := -1 // source line + dl := -1 // destination line + for h := range hunks { + hLines := countLines(hunks[h].Text) + for hl := 0; hl < hLines; hl++ { + switch { + case hunks[h].Type == 0: + sl++ + dl++ + b.graph[c][dl] = b.graph[p][sl] + case hunks[h].Type == 1: + dl++ + b.graph[c][dl] = b.revs[c] + case hunks[h].Type == -1: + sl++ + default: + panic("unreachable") + } + } + } +} + +// GoString prints the results of a Blame using git-blame's style. +func (b *blame) GoString() string { + var buf bytes.Buffer + + file, err := b.fRev.File(b.path) + if err != nil { + panic("PrettyPrint: internal error in repo.Data") + } + contents, err := file.Contents() + if err != nil { + panic("PrettyPrint: internal error in repo.Data") + } + + lines := strings.Split(contents, "\n") + // max line number length + mlnl := len(strconv.Itoa(len(lines))) + // max author length + mal := b.maxAuthorLength() + format := fmt.Sprintf("%%s (%%-%ds %%%dd) %%s\n", + mal, mlnl) + + fVs := b.graph[len(b.graph)-1] + for ln, v := range fVs { + fmt.Fprintf(&buf, format, v.Hash.String()[:8], + prettyPrintAuthor(fVs[ln]), ln+1, lines[ln]) + } + return buf.String() +} + +// utility function to pretty print the author. +func prettyPrintAuthor(c *object.Commit) string { + return fmt.Sprintf("%s %s", c.Author.Name, c.Author.When.Format("2006-01-02")) +} + +// utility function to calculate the number of runes needed +// to print the longest author name in the blame of a file. +func (b *blame) maxAuthorLength() int { + memo := make(map[plumbing.Hash]struct{}, len(b.graph)-1) + fVs := b.graph[len(b.graph)-1] + m := 0 + for ln := range fVs { + if _, ok := memo[fVs[ln].Hash]; ok { + continue + } + memo[fVs[ln].Hash] = struct{}{} + m = max(m, utf8.RuneCountInString(prettyPrintAuthor(fVs[ln]))) + } + return m +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} diff --git a/vendor/github.com/go-git/go-git/v5/common.go b/vendor/github.com/go-git/go-git/v5/common.go new file mode 100644 index 0000000000..f837a2654c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/common.go @@ -0,0 +1,22 @@ +package git + +import "strings" + +const defaultDotGitPath = ".git" + +// countLines returns the number of lines in a string à la git, this is +// The newline character is assumed to be '\n'. The empty string +// contains 0 lines. If the last line of the string doesn't end with a +// newline, it will still be considered a line. +func countLines(s string) int { + if s == "" { + return 0 + } + + nEOL := strings.Count(s, "\n") + if strings.HasSuffix(s, "\n") { + return nEOL + } + + return nEOL + 1 +} diff --git a/vendor/github.com/go-git/go-git/v5/config/branch.go b/vendor/github.com/go-git/go-git/v5/config/branch.go new file mode 100644 index 0000000000..fe86cf542c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/config/branch.go @@ -0,0 +1,90 @@ +package config + +import ( + "errors" + + "github.com/go-git/go-git/v5/plumbing" + format "github.com/go-git/go-git/v5/plumbing/format/config" +) + +var ( + errBranchEmptyName = errors.New("branch config: empty name") + errBranchInvalidMerge = errors.New("branch config: invalid merge") + errBranchInvalidRebase = errors.New("branch config: rebase must be one of 'true' or 'interactive'") +) + +// Branch contains information on the +// local branches and which remote to track +type Branch struct { + // Name of branch + Name string + // Remote name of remote to track + Remote string + // Merge is the local refspec for the branch + Merge plumbing.ReferenceName + // Rebase instead of merge when pulling. Valid values are + // "true" and "interactive". "false" is undocumented and + // typically represented by the non-existence of this field + Rebase string + + raw *format.Subsection +} + +// Validate validates fields of branch +func (b *Branch) Validate() error { + if b.Name == "" { + return errBranchEmptyName + } + + if b.Merge != "" && !b.Merge.IsBranch() { + return errBranchInvalidMerge + } + + if b.Rebase != "" && + b.Rebase != "true" && + b.Rebase != "interactive" && + b.Rebase != "false" { + return errBranchInvalidRebase + } + + return nil +} + +func (b *Branch) marshal() *format.Subsection { + if b.raw == nil { + b.raw = &format.Subsection{} + } + + b.raw.Name = b.Name + + if b.Remote == "" { + b.raw.RemoveOption(remoteSection) + } else { + b.raw.SetOption(remoteSection, b.Remote) + } + + if b.Merge == "" { + b.raw.RemoveOption(mergeKey) + } else { + b.raw.SetOption(mergeKey, string(b.Merge)) + } + + if b.Rebase == "" { + b.raw.RemoveOption(rebaseKey) + } else { + b.raw.SetOption(rebaseKey, b.Rebase) + } + + return b.raw +} + +func (b *Branch) unmarshal(s *format.Subsection) error { + b.raw = s + + b.Name = b.raw.Name + b.Remote = b.raw.Options.Get(remoteSection) + b.Merge = plumbing.ReferenceName(b.raw.Options.Get(mergeKey)) + b.Rebase = b.raw.Options.Get(rebaseKey) + + return b.Validate() +} diff --git a/vendor/github.com/go-git/go-git/v5/config/config.go b/vendor/github.com/go-git/go-git/v5/config/config.go new file mode 100644 index 0000000000..7d6ab5886b --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/config/config.go @@ -0,0 +1,564 @@ +// Package config contains the abstraction of multiple config files +package config + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strconv" + + "github.com/go-git/go-git/v5/internal/url" + format "github.com/go-git/go-git/v5/plumbing/format/config" + "github.com/mitchellh/go-homedir" +) + +const ( + // DefaultFetchRefSpec is the default refspec used for fetch. + DefaultFetchRefSpec = "+refs/heads/*:refs/remotes/%s/*" + // DefaultPushRefSpec is the default refspec used for push. + DefaultPushRefSpec = "refs/heads/*:refs/heads/*" +) + +// ConfigStorer generic storage of Config object +type ConfigStorer interface { + Config() (*Config, error) + SetConfig(*Config) error +} + +var ( + ErrInvalid = errors.New("config invalid key in remote or branch") + ErrRemoteConfigNotFound = errors.New("remote config not found") + ErrRemoteConfigEmptyURL = errors.New("remote config: empty URL") + ErrRemoteConfigEmptyName = errors.New("remote config: empty name") +) + +// Scope defines the scope of a config file, such as local, global or system. +type Scope int + +// Available ConfigScope's +const ( + LocalScope Scope = iota + GlobalScope + SystemScope +) + +// Config contains the repository configuration +// https://www.kernel.org/pub/software/scm/git/docs/git-config.html#FILES +type Config struct { + Core struct { + // IsBare if true this repository is assumed to be bare and has no + // working directory associated with it. + IsBare bool + // Worktree is the path to the root of the working tree. + Worktree string + // CommentChar is the character indicating the start of a + // comment for commands like commit and tag + CommentChar string + } + + User struct { + // Name is the personal name of the author and the commiter of a commit. + Name string + // Email is the email of the author and the commiter of a commit. + Email string + } + + Author struct { + // Name is the personal name of the author of a commit. + Name string + // Email is the email of the author of a commit. + Email string + } + + Committer struct { + // Name is the personal name of the commiter of a commit. + Name string + // Email is the email of the the commiter of a commit. + Email string + } + + Pack struct { + // Window controls the size of the sliding window for delta + // compression. The default is 10. A value of 0 turns off + // delta compression entirely. + Window uint + } + + // Remotes list of repository remotes, the key of the map is the name + // of the remote, should equal to RemoteConfig.Name. + Remotes map[string]*RemoteConfig + // Submodules list of repository submodules, the key of the map is the name + // of the submodule, should equal to Submodule.Name. + Submodules map[string]*Submodule + // Branches list of branches, the key is the branch name and should + // equal Branch.Name + Branches map[string]*Branch + // Raw contains the raw information of a config file. The main goal is + // preserve the parsed information from the original format, to avoid + // dropping unsupported fields. + Raw *format.Config +} + +// NewConfig returns a new empty Config. +func NewConfig() *Config { + config := &Config{ + Remotes: make(map[string]*RemoteConfig), + Submodules: make(map[string]*Submodule), + Branches: make(map[string]*Branch), + Raw: format.New(), + } + + config.Pack.Window = DefaultPackWindow + + return config +} + +// ReadConfig reads a config file from a io.Reader. +func ReadConfig(r io.Reader) (*Config, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + cfg := NewConfig() + if err = cfg.Unmarshal(b); err != nil { + return nil, err + } + + return cfg, nil +} + +// LoadConfig loads a config file from a given scope. The returned Config, +// contains exclusively information fom the given scope. If couldn't find a +// config file to the given scope, a empty one is returned. +func LoadConfig(scope Scope) (*Config, error) { + if scope == LocalScope { + return nil, fmt.Errorf("LocalScope should be read from the a ConfigStorer.") + } + + files, err := Paths(scope) + if err != nil { + return nil, err + } + + for _, file := range files { + f, err := os.Open(file) + if err != nil { + if os.IsNotExist(err) { + continue + } + + return nil, err + } + + defer f.Close() + return ReadConfig(f) + } + + return NewConfig(), nil +} + +// Paths returns the config file location for a given scope. +func Paths(scope Scope) ([]string, error) { + var files []string + switch scope { + case GlobalScope: + xdg := os.Getenv("XDG_CONFIG_HOME") + if xdg != "" { + files = append(files, filepath.Join(xdg, "git/config")) + } + + home, err := homedir.Dir() + if err != nil { + return nil, err + } + + files = append(files, + filepath.Join(home, ".gitconfig"), + filepath.Join(home, ".config/git/config"), + ) + case SystemScope: + files = append(files, "/etc/gitconfig") + } + + return files, nil +} + +// Validate validates the fields and sets the default values. +func (c *Config) Validate() error { + for name, r := range c.Remotes { + if r.Name != name { + return ErrInvalid + } + + if err := r.Validate(); err != nil { + return err + } + } + + for name, b := range c.Branches { + if b.Name != name { + return ErrInvalid + } + + if err := b.Validate(); err != nil { + return err + } + } + + return nil +} + +const ( + remoteSection = "remote" + submoduleSection = "submodule" + branchSection = "branch" + coreSection = "core" + packSection = "pack" + userSection = "user" + authorSection = "author" + committerSection = "committer" + fetchKey = "fetch" + urlKey = "url" + bareKey = "bare" + worktreeKey = "worktree" + commentCharKey = "commentChar" + windowKey = "window" + mergeKey = "merge" + rebaseKey = "rebase" + nameKey = "name" + emailKey = "email" + + // DefaultPackWindow holds the number of previous objects used to + // generate deltas. The value 10 is the same used by git command. + DefaultPackWindow = uint(10) +) + +// Unmarshal parses a git-config file and stores it. +func (c *Config) Unmarshal(b []byte) error { + r := bytes.NewBuffer(b) + d := format.NewDecoder(r) + + c.Raw = format.New() + if err := d.Decode(c.Raw); err != nil { + return err + } + + c.unmarshalCore() + c.unmarshalUser() + if err := c.unmarshalPack(); err != nil { + return err + } + unmarshalSubmodules(c.Raw, c.Submodules) + + if err := c.unmarshalBranches(); err != nil { + return err + } + + return c.unmarshalRemotes() +} + +func (c *Config) unmarshalCore() { + s := c.Raw.Section(coreSection) + if s.Options.Get(bareKey) == "true" { + c.Core.IsBare = true + } + + c.Core.Worktree = s.Options.Get(worktreeKey) + c.Core.CommentChar = s.Options.Get(commentCharKey) +} + +func (c *Config) unmarshalUser() { + s := c.Raw.Section(userSection) + c.User.Name = s.Options.Get(nameKey) + c.User.Email = s.Options.Get(emailKey) + + s = c.Raw.Section(authorSection) + c.Author.Name = s.Options.Get(nameKey) + c.Author.Email = s.Options.Get(emailKey) + + s = c.Raw.Section(committerSection) + c.Committer.Name = s.Options.Get(nameKey) + c.Committer.Email = s.Options.Get(emailKey) +} + +func (c *Config) unmarshalPack() error { + s := c.Raw.Section(packSection) + window := s.Options.Get(windowKey) + if window == "" { + c.Pack.Window = DefaultPackWindow + } else { + winUint, err := strconv.ParseUint(window, 10, 32) + if err != nil { + return err + } + c.Pack.Window = uint(winUint) + } + return nil +} + +func (c *Config) unmarshalRemotes() error { + s := c.Raw.Section(remoteSection) + for _, sub := range s.Subsections { + r := &RemoteConfig{} + if err := r.unmarshal(sub); err != nil { + return err + } + + c.Remotes[r.Name] = r + } + + return nil +} + +func unmarshalSubmodules(fc *format.Config, submodules map[string]*Submodule) { + s := fc.Section(submoduleSection) + for _, sub := range s.Subsections { + m := &Submodule{} + m.unmarshal(sub) + + if m.Validate() == ErrModuleBadPath { + continue + } + + submodules[m.Name] = m + } +} + +func (c *Config) unmarshalBranches() error { + bs := c.Raw.Section(branchSection) + for _, sub := range bs.Subsections { + b := &Branch{} + + if err := b.unmarshal(sub); err != nil { + return err + } + + c.Branches[b.Name] = b + } + return nil +} + +// Marshal returns Config encoded as a git-config file. +func (c *Config) Marshal() ([]byte, error) { + c.marshalCore() + c.marshalUser() + c.marshalPack() + c.marshalRemotes() + c.marshalSubmodules() + c.marshalBranches() + + buf := bytes.NewBuffer(nil) + if err := format.NewEncoder(buf).Encode(c.Raw); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (c *Config) marshalCore() { + s := c.Raw.Section(coreSection) + s.SetOption(bareKey, fmt.Sprintf("%t", c.Core.IsBare)) + + if c.Core.Worktree != "" { + s.SetOption(worktreeKey, c.Core.Worktree) + } +} + +func (c *Config) marshalUser() { + s := c.Raw.Section(userSection) + if c.User.Name != "" { + s.SetOption(nameKey, c.User.Name) + } + + if c.User.Email != "" { + s.SetOption(emailKey, c.User.Email) + } + + s = c.Raw.Section(authorSection) + if c.Author.Name != "" { + s.SetOption(nameKey, c.Author.Name) + } + + if c.Author.Email != "" { + s.SetOption(emailKey, c.Author.Email) + } + + s = c.Raw.Section(committerSection) + if c.Committer.Name != "" { + s.SetOption(nameKey, c.Committer.Name) + } + + if c.Committer.Email != "" { + s.SetOption(emailKey, c.Committer.Email) + } +} + +func (c *Config) marshalPack() { + s := c.Raw.Section(packSection) + if c.Pack.Window != DefaultPackWindow { + s.SetOption(windowKey, fmt.Sprintf("%d", c.Pack.Window)) + } +} + +func (c *Config) marshalRemotes() { + s := c.Raw.Section(remoteSection) + newSubsections := make(format.Subsections, 0, len(c.Remotes)) + added := make(map[string]bool) + for _, subsection := range s.Subsections { + if remote, ok := c.Remotes[subsection.Name]; ok { + newSubsections = append(newSubsections, remote.marshal()) + added[subsection.Name] = true + } + } + + remoteNames := make([]string, 0, len(c.Remotes)) + for name := range c.Remotes { + remoteNames = append(remoteNames, name) + } + + sort.Strings(remoteNames) + + for _, name := range remoteNames { + if !added[name] { + newSubsections = append(newSubsections, c.Remotes[name].marshal()) + } + } + + s.Subsections = newSubsections +} + +func (c *Config) marshalSubmodules() { + s := c.Raw.Section(submoduleSection) + s.Subsections = make(format.Subsections, len(c.Submodules)) + + var i int + for _, r := range c.Submodules { + section := r.marshal() + // the submodule section at config is a subset of the .gitmodule file + // we should remove the non-valid options for the config file. + section.RemoveOption(pathKey) + s.Subsections[i] = section + i++ + } +} + +func (c *Config) marshalBranches() { + s := c.Raw.Section(branchSection) + newSubsections := make(format.Subsections, 0, len(c.Branches)) + added := make(map[string]bool) + for _, subsection := range s.Subsections { + if branch, ok := c.Branches[subsection.Name]; ok { + newSubsections = append(newSubsections, branch.marshal()) + added[subsection.Name] = true + } + } + + branchNames := make([]string, 0, len(c.Branches)) + for name := range c.Branches { + branchNames = append(branchNames, name) + } + + sort.Strings(branchNames) + + for _, name := range branchNames { + if !added[name] { + newSubsections = append(newSubsections, c.Branches[name].marshal()) + } + } + + s.Subsections = newSubsections +} + +// RemoteConfig contains the configuration for a given remote repository. +type RemoteConfig struct { + // Name of the remote + Name string + // URLs the URLs of a remote repository. It must be non-empty. Fetch will + // always use the first URL, while push will use all of them. + URLs []string + // Fetch the default set of "refspec" for fetch operation + Fetch []RefSpec + + // raw representation of the subsection, filled by marshal or unmarshal are + // called + raw *format.Subsection +} + +// Validate validates the fields and sets the default values. +func (c *RemoteConfig) Validate() error { + if c.Name == "" { + return ErrRemoteConfigEmptyName + } + + if len(c.URLs) == 0 { + return ErrRemoteConfigEmptyURL + } + + for _, r := range c.Fetch { + if err := r.Validate(); err != nil { + return err + } + } + + if len(c.Fetch) == 0 { + c.Fetch = []RefSpec{RefSpec(fmt.Sprintf(DefaultFetchRefSpec, c.Name))} + } + + return nil +} + +func (c *RemoteConfig) unmarshal(s *format.Subsection) error { + c.raw = s + + fetch := []RefSpec{} + for _, f := range c.raw.Options.GetAll(fetchKey) { + rs := RefSpec(f) + if err := rs.Validate(); err != nil { + return err + } + + fetch = append(fetch, rs) + } + + c.Name = c.raw.Name + c.URLs = append([]string(nil), c.raw.Options.GetAll(urlKey)...) + c.Fetch = fetch + + return nil +} + +func (c *RemoteConfig) marshal() *format.Subsection { + if c.raw == nil { + c.raw = &format.Subsection{} + } + + c.raw.Name = c.Name + if len(c.URLs) == 0 { + c.raw.RemoveOption(urlKey) + } else { + c.raw.SetOption(urlKey, c.URLs...) + } + + if len(c.Fetch) == 0 { + c.raw.RemoveOption(fetchKey) + } else { + var values []string + for _, rs := range c.Fetch { + values = append(values, rs.String()) + } + + c.raw.SetOption(fetchKey, values...) + } + + return c.raw +} + +func (c *RemoteConfig) IsFirstURLLocal() bool { + return url.IsLocalEndpoint(c.URLs[0]) +} diff --git a/vendor/github.com/go-git/go-git/v5/config/modules.go b/vendor/github.com/go-git/go-git/v5/config/modules.go new file mode 100644 index 0000000000..1c10aa354e --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/config/modules.go @@ -0,0 +1,139 @@ +package config + +import ( + "bytes" + "errors" + "regexp" + + format "github.com/go-git/go-git/v5/plumbing/format/config" +) + +var ( + ErrModuleEmptyURL = errors.New("module config: empty URL") + ErrModuleEmptyPath = errors.New("module config: empty path") + ErrModuleBadPath = errors.New("submodule has an invalid path") +) + +var ( + // Matches module paths with dotdot ".." components. + dotdotPath = regexp.MustCompile(`(^|[/\\])\.\.([/\\]|$)`) +) + +// Modules defines the submodules properties, represents a .gitmodules file +// https://www.kernel.org/pub/software/scm/git/docs/gitmodules.html +type Modules struct { + // Submodules is a map of submodules being the key the name of the submodule. + Submodules map[string]*Submodule + + raw *format.Config +} + +// NewModules returns a new empty Modules +func NewModules() *Modules { + return &Modules{ + Submodules: make(map[string]*Submodule), + raw: format.New(), + } +} + +const ( + pathKey = "path" + branchKey = "branch" +) + +// Unmarshal parses a git-config file and stores it. +func (m *Modules) Unmarshal(b []byte) error { + r := bytes.NewBuffer(b) + d := format.NewDecoder(r) + + m.raw = format.New() + if err := d.Decode(m.raw); err != nil { + return err + } + + unmarshalSubmodules(m.raw, m.Submodules) + return nil +} + +// Marshal returns Modules encoded as a git-config file. +func (m *Modules) Marshal() ([]byte, error) { + s := m.raw.Section(submoduleSection) + s.Subsections = make(format.Subsections, len(m.Submodules)) + + var i int + for _, r := range m.Submodules { + s.Subsections[i] = r.marshal() + i++ + } + + buf := bytes.NewBuffer(nil) + if err := format.NewEncoder(buf).Encode(m.raw); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// Submodule defines a submodule. +type Submodule struct { + // Name module name + Name string + // Path defines the path, relative to the top-level directory of the Git + // working tree. + Path string + // URL defines a URL from which the submodule repository can be cloned. + URL string + // Branch is a remote branch name for tracking updates in the upstream + // submodule. Optional value. + Branch string + + // raw representation of the subsection, filled by marshal or unmarshal are + // called. + raw *format.Subsection +} + +// Validate validates the fields and sets the default values. +func (m *Submodule) Validate() error { + if m.Path == "" { + return ErrModuleEmptyPath + } + + if m.URL == "" { + return ErrModuleEmptyURL + } + + if dotdotPath.MatchString(m.Path) { + return ErrModuleBadPath + } + + return nil +} + +func (m *Submodule) unmarshal(s *format.Subsection) { + m.raw = s + + m.Name = m.raw.Name + m.Path = m.raw.Option(pathKey) + m.URL = m.raw.Option(urlKey) + m.Branch = m.raw.Option(branchKey) +} + +func (m *Submodule) marshal() *format.Subsection { + if m.raw == nil { + m.raw = &format.Subsection{} + } + + m.raw.Name = m.Name + if m.raw.Name == "" { + m.raw.Name = m.Path + } + + m.raw.SetOption(pathKey, m.Path) + m.raw.SetOption(urlKey, m.URL) + + if m.Branch != "" { + m.raw.SetOption(branchKey, m.Branch) + } + + return m.raw +} diff --git a/vendor/github.com/go-git/go-git/v5/config/refspec.go b/vendor/github.com/go-git/go-git/v5/config/refspec.go new file mode 100644 index 0000000000..4bfaa37bbb --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/config/refspec.go @@ -0,0 +1,155 @@ +package config + +import ( + "errors" + "strings" + + "github.com/go-git/go-git/v5/plumbing" +) + +const ( + refSpecWildcard = "*" + refSpecForce = "+" + refSpecSeparator = ":" +) + +var ( + ErrRefSpecMalformedSeparator = errors.New("malformed refspec, separators are wrong") + ErrRefSpecMalformedWildcard = errors.New("malformed refspec, mismatched number of wildcards") +) + +// RefSpec is a mapping from local branches to remote references. +// The format of the refspec is an optional +, followed by :, where +// is the pattern for references on the remote side and is where +// those references will be written locally. The + tells Git to update the +// reference even if it isn’t a fast-forward. +// eg.: "+refs/heads/*:refs/remotes/origin/*" +// +// https://git-scm.com/book/en/v2/Git-Internals-The-Refspec +type RefSpec string + +// Validate validates the RefSpec +func (s RefSpec) Validate() error { + spec := string(s) + if strings.Count(spec, refSpecSeparator) != 1 { + return ErrRefSpecMalformedSeparator + } + + sep := strings.Index(spec, refSpecSeparator) + if sep == len(spec)-1 { + return ErrRefSpecMalformedSeparator + } + + ws := strings.Count(spec[0:sep], refSpecWildcard) + wd := strings.Count(spec[sep+1:], refSpecWildcard) + if ws == wd && ws < 2 && wd < 2 { + return nil + } + + return ErrRefSpecMalformedWildcard +} + +// IsForceUpdate returns if update is allowed in non fast-forward merges. +func (s RefSpec) IsForceUpdate() bool { + return s[0] == refSpecForce[0] +} + +// IsDelete returns true if the refspec indicates a delete (empty src). +func (s RefSpec) IsDelete() bool { + return s[0] == refSpecSeparator[0] +} + +// IsExactSHA1 returns true if the source is a SHA1 hash. +func (s RefSpec) IsExactSHA1() bool { + return plumbing.IsHash(s.Src()) +} + +// Src return the src side. +func (s RefSpec) Src() string { + spec := string(s) + + var start int + if s.IsForceUpdate() { + start = 1 + } else { + start = 0 + } + + end := strings.Index(spec, refSpecSeparator) + return spec[start:end] +} + +// Match match the given plumbing.ReferenceName against the source. +func (s RefSpec) Match(n plumbing.ReferenceName) bool { + if !s.IsWildcard() { + return s.matchExact(n) + } + + return s.matchGlob(n) +} + +// IsWildcard returns true if the RefSpec contains a wildcard. +func (s RefSpec) IsWildcard() bool { + return strings.Contains(string(s), refSpecWildcard) +} + +func (s RefSpec) matchExact(n plumbing.ReferenceName) bool { + return s.Src() == n.String() +} + +func (s RefSpec) matchGlob(n plumbing.ReferenceName) bool { + src := s.Src() + name := n.String() + wildcard := strings.Index(src, refSpecWildcard) + + var prefix, suffix string + prefix = src[0:wildcard] + if len(src) > wildcard+1 { + suffix = src[wildcard+1:] + } + + return len(name) >= len(prefix)+len(suffix) && + strings.HasPrefix(name, prefix) && + strings.HasSuffix(name, suffix) +} + +// Dst returns the destination for the given remote reference. +func (s RefSpec) Dst(n plumbing.ReferenceName) plumbing.ReferenceName { + spec := string(s) + start := strings.Index(spec, refSpecSeparator) + 1 + dst := spec[start:] + src := s.Src() + + if !s.IsWildcard() { + return plumbing.ReferenceName(dst) + } + + name := n.String() + ws := strings.Index(src, refSpecWildcard) + wd := strings.Index(dst, refSpecWildcard) + match := name[ws : len(name)-(len(src)-(ws+1))] + + return plumbing.ReferenceName(dst[0:wd] + match + dst[wd+1:]) +} + +func (s RefSpec) Reverse() RefSpec { + spec := string(s) + separator := strings.Index(spec, refSpecSeparator) + + return RefSpec(spec[separator+1:] + refSpecSeparator + spec[:separator]) +} + +func (s RefSpec) String() string { + return string(s) +} + +// MatchAny returns true if any of the RefSpec match with the given ReferenceName. +func MatchAny(l []RefSpec, n plumbing.ReferenceName) bool { + for _, r := range l { + if r.Match(n) { + return true + } + } + + return false +} diff --git a/vendor/github.com/go-git/go-git/v5/doc.go b/vendor/github.com/go-git/go-git/v5/doc.go new file mode 100644 index 0000000000..3d817fe9c8 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/doc.go @@ -0,0 +1,10 @@ +// A highly extensible git implementation in pure Go. +// +// go-git aims to reach the completeness of libgit2 or jgit, nowadays covers the +// majority of the plumbing read operations and some of the main write +// operations, but lacks the main porcelain operations such as merges. +// +// It is highly extensible, we have been following the open/close principle in +// its design to facilitate extensions, mainly focusing the efforts on the +// persistence of the objects. +package git diff --git a/vendor/github.com/go-git/go-git/v5/internal/revision/parser.go b/vendor/github.com/go-git/go-git/v5/internal/revision/parser.go new file mode 100644 index 0000000000..8facf17ffb --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/internal/revision/parser.go @@ -0,0 +1,622 @@ +// Package revision extracts git revision from string +// More information about revision : https://www.kernel.org/pub/software/scm/git/docs/gitrevisions.html +package revision + +import ( + "bytes" + "fmt" + "io" + "regexp" + "strconv" + "time" +) + +// ErrInvalidRevision is emitted if string doesn't match valid revision +type ErrInvalidRevision struct { + s string +} + +func (e *ErrInvalidRevision) Error() string { + return "Revision invalid : " + e.s +} + +// Revisioner represents a revision component. +// A revision is made of multiple revision components +// obtained after parsing a revision string, +// for instance revision "master~" will be converted in +// two revision components Ref and TildePath +type Revisioner interface { +} + +// Ref represents a reference name : HEAD, master, +type Ref string + +// TildePath represents ~, ~{n} +type TildePath struct { + Depth int +} + +// CaretPath represents ^, ^{n} +type CaretPath struct { + Depth int +} + +// CaretReg represents ^{/foo bar} +type CaretReg struct { + Regexp *regexp.Regexp + Negate bool +} + +// CaretType represents ^{commit} +type CaretType struct { + ObjectType string +} + +// AtReflog represents @{n} +type AtReflog struct { + Depth int +} + +// AtCheckout represents @{-n} +type AtCheckout struct { + Depth int +} + +// AtUpstream represents @{upstream}, @{u} +type AtUpstream struct { + BranchName string +} + +// AtPush represents @{push} +type AtPush struct { + BranchName string +} + +// AtDate represents @{"2006-01-02T15:04:05Z"} +type AtDate struct { + Date time.Time +} + +// ColonReg represents :/foo bar +type ColonReg struct { + Regexp *regexp.Regexp + Negate bool +} + +// ColonPath represents :./ : +type ColonPath struct { + Path string +} + +// ColonStagePath represents ::/ +type ColonStagePath struct { + Path string + Stage int +} + +// Parser represents a parser +// use to tokenize and transform to revisioner chunks +// a given string +type Parser struct { + s *scanner + currentParsedChar struct { + tok token + lit string + } + unreadLastChar bool +} + +// NewParserFromString returns a new instance of parser from a string. +func NewParserFromString(s string) *Parser { + return NewParser(bytes.NewBufferString(s)) +} + +// NewParser returns a new instance of parser. +func NewParser(r io.Reader) *Parser { + return &Parser{s: newScanner(r)} +} + +// scan returns the next token from the underlying scanner +// or the last scanned token if an unscan was requested +func (p *Parser) scan() (token, string, error) { + if p.unreadLastChar { + p.unreadLastChar = false + return p.currentParsedChar.tok, p.currentParsedChar.lit, nil + } + + tok, lit, err := p.s.scan() + + p.currentParsedChar.tok, p.currentParsedChar.lit = tok, lit + + return tok, lit, err +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { p.unreadLastChar = true } + +// Parse explode a revision string into revisioner chunks +func (p *Parser) Parse() ([]Revisioner, error) { + var rev Revisioner + var revs []Revisioner + var tok token + var err error + + for { + tok, _, err = p.scan() + + if err != nil { + return nil, err + } + + switch tok { + case at: + rev, err = p.parseAt() + case tilde: + rev, err = p.parseTilde() + case caret: + rev, err = p.parseCaret() + case colon: + rev, err = p.parseColon() + case eof: + err = p.validateFullRevision(&revs) + + if err != nil { + return []Revisioner{}, err + } + + return revs, nil + default: + p.unscan() + rev, err = p.parseRef() + } + + if err != nil { + return []Revisioner{}, err + } + + revs = append(revs, rev) + } +} + +// validateFullRevision ensures all revisioner chunks make a valid revision +func (p *Parser) validateFullRevision(chunks *[]Revisioner) error { + var hasReference bool + + for i, chunk := range *chunks { + switch chunk.(type) { + case Ref: + if i == 0 { + hasReference = true + } else { + return &ErrInvalidRevision{`reference must be defined once at the beginning`} + } + case AtDate: + if len(*chunks) == 1 || hasReference && len(*chunks) == 2 { + return nil + } + + return &ErrInvalidRevision{`"@" statement is not valid, could be : @{}, @{}`} + case AtReflog: + if len(*chunks) == 1 || hasReference && len(*chunks) == 2 { + return nil + } + + return &ErrInvalidRevision{`"@" statement is not valid, could be : @{}, @{}`} + case AtCheckout: + if len(*chunks) == 1 { + return nil + } + + return &ErrInvalidRevision{`"@" statement is not valid, could be : @{-}`} + case AtUpstream: + if len(*chunks) == 1 || hasReference && len(*chunks) == 2 { + return nil + } + + return &ErrInvalidRevision{`"@" statement is not valid, could be : @{upstream}, @{upstream}, @{u}, @{u}`} + case AtPush: + if len(*chunks) == 1 || hasReference && len(*chunks) == 2 { + return nil + } + + return &ErrInvalidRevision{`"@" statement is not valid, could be : @{push}, @{push}`} + case TildePath, CaretPath, CaretReg: + if !hasReference { + return &ErrInvalidRevision{`"~" or "^" statement must have a reference defined at the beginning`} + } + case ColonReg: + if len(*chunks) == 1 { + return nil + } + + return &ErrInvalidRevision{`":" statement is not valid, could be : :/`} + case ColonPath: + if i == len(*chunks)-1 && hasReference || len(*chunks) == 1 { + return nil + } + + return &ErrInvalidRevision{`":" statement is not valid, could be : :`} + case ColonStagePath: + if len(*chunks) == 1 { + return nil + } + + return &ErrInvalidRevision{`":" statement is not valid, could be : ::`} + } + } + + return nil +} + +// parseAt extract @ statements +func (p *Parser) parseAt() (Revisioner, error) { + var tok, nextTok token + var lit, nextLit string + var err error + + tok, _, err = p.scan() + + if err != nil { + return nil, err + } + + if tok != obrace { + p.unscan() + + return Ref("HEAD"), nil + } + + tok, lit, err = p.scan() + + if err != nil { + return nil, err + } + + nextTok, nextLit, err = p.scan() + + if err != nil { + return nil, err + } + + switch { + case tok == word && (lit == "u" || lit == "upstream") && nextTok == cbrace: + return AtUpstream{}, nil + case tok == word && lit == "push" && nextTok == cbrace: + return AtPush{}, nil + case tok == number && nextTok == cbrace: + n, _ := strconv.Atoi(lit) + + return AtReflog{n}, nil + case tok == minus && nextTok == number: + n, _ := strconv.Atoi(nextLit) + + t, _, err := p.scan() + + if err != nil { + return nil, err + } + + if t != cbrace { + return nil, &ErrInvalidRevision{s: `missing "}" in @{-n} structure`} + } + + return AtCheckout{n}, nil + default: + p.unscan() + + date := lit + + for { + tok, lit, err = p.scan() + + if err != nil { + return nil, err + } + + switch { + case tok == cbrace: + t, err := time.Parse("2006-01-02T15:04:05Z", date) + + if err != nil { + return nil, &ErrInvalidRevision{fmt.Sprintf(`wrong date "%s" must fit ISO-8601 format : 2006-01-02T15:04:05Z`, date)} + } + + return AtDate{t}, nil + default: + date += lit + } + } + } +} + +// parseTilde extract ~ statements +func (p *Parser) parseTilde() (Revisioner, error) { + var tok token + var lit string + var err error + + tok, lit, err = p.scan() + + if err != nil { + return nil, err + } + + switch { + case tok == number: + n, _ := strconv.Atoi(lit) + + return TildePath{n}, nil + default: + p.unscan() + return TildePath{1}, nil + } +} + +// parseCaret extract ^ statements +func (p *Parser) parseCaret() (Revisioner, error) { + var tok token + var lit string + var err error + + tok, lit, err = p.scan() + + if err != nil { + return nil, err + } + + switch { + case tok == obrace: + r, err := p.parseCaretBraces() + + if err != nil { + return nil, err + } + + return r, nil + case tok == number: + n, _ := strconv.Atoi(lit) + + if n > 2 { + return nil, &ErrInvalidRevision{fmt.Sprintf(`"%s" found must be 0, 1 or 2 after "^"`, lit)} + } + + return CaretPath{n}, nil + default: + p.unscan() + return CaretPath{1}, nil + } +} + +// parseCaretBraces extract ^{} statements +func (p *Parser) parseCaretBraces() (Revisioner, error) { + var tok, nextTok token + var lit, _ string + start := true + var re string + var negate bool + var err error + + for { + tok, lit, err = p.scan() + + if err != nil { + return nil, err + } + + nextTok, _, err = p.scan() + + if err != nil { + return nil, err + } + + switch { + case tok == word && nextTok == cbrace && (lit == "commit" || lit == "tree" || lit == "blob" || lit == "tag" || lit == "object"): + return CaretType{lit}, nil + case re == "" && tok == cbrace: + return CaretType{"tag"}, nil + case re == "" && tok == emark && nextTok == emark: + re += lit + case re == "" && tok == emark && nextTok == minus: + negate = true + case re == "" && tok == emark: + return nil, &ErrInvalidRevision{s: `revision suffix brace component sequences starting with "/!" others than those defined are reserved`} + case re == "" && tok == slash: + p.unscan() + case tok != slash && start: + return nil, &ErrInvalidRevision{fmt.Sprintf(`"%s" is not a valid revision suffix brace component`, lit)} + case tok != cbrace: + p.unscan() + re += lit + case tok == cbrace: + p.unscan() + + reg, err := regexp.Compile(re) + + if err != nil { + return CaretReg{}, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component, %s`, err.Error())} + } + + return CaretReg{reg, negate}, nil + } + + start = false + } +} + +// parseColon extract : statements +func (p *Parser) parseColon() (Revisioner, error) { + var tok token + var err error + + tok, _, err = p.scan() + + if err != nil { + return nil, err + } + + switch tok { + case slash: + return p.parseColonSlash() + default: + p.unscan() + return p.parseColonDefault() + } +} + +// parseColonSlash extract :/ statements +func (p *Parser) parseColonSlash() (Revisioner, error) { + var tok, nextTok token + var lit string + var re string + var negate bool + var err error + + for { + tok, lit, err = p.scan() + + if err != nil { + return nil, err + } + + nextTok, _, err = p.scan() + + if err != nil { + return nil, err + } + + switch { + case tok == emark && nextTok == emark: + re += lit + case re == "" && tok == emark && nextTok == minus: + negate = true + case re == "" && tok == emark: + return nil, &ErrInvalidRevision{s: `revision suffix brace component sequences starting with "/!" others than those defined are reserved`} + case tok == eof: + p.unscan() + reg, err := regexp.Compile(re) + + if err != nil { + return ColonReg{}, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component, %s`, err.Error())} + } + + return ColonReg{reg, negate}, nil + default: + p.unscan() + re += lit + } + } +} + +// parseColonDefault extract : statements +func (p *Parser) parseColonDefault() (Revisioner, error) { + var tok token + var lit string + var path string + var stage int + var err error + var n = -1 + + tok, lit, err = p.scan() + + if err != nil { + return nil, err + } + + nextTok, _, err := p.scan() + + if err != nil { + return nil, err + } + + if tok == number && nextTok == colon { + n, _ = strconv.Atoi(lit) + } + + switch n { + case 0, 1, 2, 3: + stage = n + default: + path += lit + p.unscan() + } + + for { + tok, lit, err = p.scan() + + if err != nil { + return nil, err + } + + switch { + case tok == eof && n == -1: + return ColonPath{path}, nil + case tok == eof: + return ColonStagePath{path, stage}, nil + default: + path += lit + } + } +} + +// parseRef extract reference name +func (p *Parser) parseRef() (Revisioner, error) { + var tok, prevTok token + var lit, buf string + var endOfRef bool + var err error + + for { + tok, lit, err = p.scan() + + if err != nil { + return nil, err + } + + switch tok { + case eof, at, colon, tilde, caret: + endOfRef = true + } + + err := p.checkRefFormat(tok, lit, prevTok, buf, endOfRef) + + if err != nil { + return "", err + } + + if endOfRef { + p.unscan() + return Ref(buf), nil + } + + buf += lit + prevTok = tok + } +} + +// checkRefFormat ensure reference name follow rules defined here : +// https://git-scm.com/docs/git-check-ref-format +func (p *Parser) checkRefFormat(token token, literal string, previousToken token, buffer string, endOfRef bool) error { + switch token { + case aslash, space, control, qmark, asterisk, obracket: + return &ErrInvalidRevision{fmt.Sprintf(`must not contains "%s"`, literal)} + } + + switch { + case (token == dot || token == slash) && buffer == "": + return &ErrInvalidRevision{fmt.Sprintf(`must not start with "%s"`, literal)} + case previousToken == slash && endOfRef: + return &ErrInvalidRevision{`must not end with "/"`} + case previousToken == dot && endOfRef: + return &ErrInvalidRevision{`must not end with "."`} + case token == dot && previousToken == slash: + return &ErrInvalidRevision{`must not contains "/."`} + case previousToken == dot && token == dot: + return &ErrInvalidRevision{`must not contains ".."`} + case previousToken == slash && token == slash: + return &ErrInvalidRevision{`must not contains consecutively "/"`} + case (token == slash || endOfRef) && len(buffer) > 4 && buffer[len(buffer)-5:] == ".lock": + return &ErrInvalidRevision{"cannot end with .lock"} + } + + return nil +} diff --git a/vendor/github.com/go-git/go-git/v5/internal/revision/scanner.go b/vendor/github.com/go-git/go-git/v5/internal/revision/scanner.go new file mode 100644 index 0000000000..c46c21b795 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/internal/revision/scanner.go @@ -0,0 +1,117 @@ +package revision + +import ( + "bufio" + "io" + "unicode" +) + +// runeCategoryValidator takes a rune as input and +// validates it belongs to a rune category +type runeCategoryValidator func(r rune) bool + +// tokenizeExpression aggregates a series of runes matching check predicate into a single +// string and provides given tokenType as token type +func tokenizeExpression(ch rune, tokenType token, check runeCategoryValidator, r *bufio.Reader) (token, string, error) { + var data []rune + data = append(data, ch) + + for { + c, _, err := r.ReadRune() + + if c == zeroRune { + break + } + + if err != nil { + return tokenError, "", err + } + + if check(c) { + data = append(data, c) + } else { + err := r.UnreadRune() + + if err != nil { + return tokenError, "", err + } + + return tokenType, string(data), nil + } + } + + return tokenType, string(data), nil +} + +var zeroRune = rune(0) + +// scanner represents a lexical scanner. +type scanner struct { + r *bufio.Reader +} + +// newScanner returns a new instance of scanner. +func newScanner(r io.Reader) *scanner { + return &scanner{r: bufio.NewReader(r)} +} + +// Scan extracts tokens and their strings counterpart +// from the reader +func (s *scanner) scan() (token, string, error) { + ch, _, err := s.r.ReadRune() + + if err != nil && err != io.EOF { + return tokenError, "", err + } + + switch ch { + case zeroRune: + return eof, "", nil + case ':': + return colon, string(ch), nil + case '~': + return tilde, string(ch), nil + case '^': + return caret, string(ch), nil + case '.': + return dot, string(ch), nil + case '/': + return slash, string(ch), nil + case '{': + return obrace, string(ch), nil + case '}': + return cbrace, string(ch), nil + case '-': + return minus, string(ch), nil + case '@': + return at, string(ch), nil + case '\\': + return aslash, string(ch), nil + case '?': + return qmark, string(ch), nil + case '*': + return asterisk, string(ch), nil + case '[': + return obracket, string(ch), nil + case '!': + return emark, string(ch), nil + } + + if unicode.IsSpace(ch) { + return space, string(ch), nil + } + + if unicode.IsControl(ch) { + return control, string(ch), nil + } + + if unicode.IsLetter(ch) { + return tokenizeExpression(ch, word, unicode.IsLetter, s.r) + } + + if unicode.IsNumber(ch) { + return tokenizeExpression(ch, number, unicode.IsNumber, s.r) + } + + return tokenError, string(ch), nil +} diff --git a/vendor/github.com/go-git/go-git/v5/internal/revision/token.go b/vendor/github.com/go-git/go-git/v5/internal/revision/token.go new file mode 100644 index 0000000000..abc4048869 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/internal/revision/token.go @@ -0,0 +1,28 @@ +package revision + +// token represents a entity extracted from string parsing +type token int + +const ( + eof token = iota + + aslash + asterisk + at + caret + cbrace + colon + control + dot + emark + minus + number + obrace + obracket + qmark + slash + space + tilde + tokenError + word +) diff --git a/vendor/github.com/go-git/go-git/v5/internal/url/url.go b/vendor/github.com/go-git/go-git/v5/internal/url/url.go new file mode 100644 index 0000000000..14cf133de8 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/internal/url/url.go @@ -0,0 +1,37 @@ +package url + +import ( + "regexp" +) + +var ( + isSchemeRegExp = regexp.MustCompile(`^[^:]+://`) + scpLikeUrlRegExp = regexp.MustCompile(`^(?:(?P[^@]+)@)?(?P[^:\s]+):(?:(?P[0-9]{1,5})(?:\/|:))?(?P[^\\].*\/[^\\].*)$`) +) + +// MatchesScheme returns true if the given string matches a URL-like +// format scheme. +func MatchesScheme(url string) bool { + return isSchemeRegExp.MatchString(url) +} + +// MatchesScpLike returns true if the given string matches an SCP-like +// format scheme. +func MatchesScpLike(url string) bool { + return scpLikeUrlRegExp.MatchString(url) +} + +// FindScpLikeComponents returns the user, host, port and path of the +// given SCP-like URL. +func FindScpLikeComponents(url string) (user, host, port, path string) { + m := scpLikeUrlRegExp.FindStringSubmatch(url) + return m[1], m[2], m[3], m[4] +} + +// IsLocalEndpoint returns true if the given URL string specifies a +// local file endpoint. For example, on a Linux machine, +// `/home/user/src/go-git` would match as a local endpoint, but +// `https://github.com/src-d/go-git` would not. +func IsLocalEndpoint(url string) bool { + return !MatchesScheme(url) && !MatchesScpLike(url) +} diff --git a/vendor/github.com/go-git/go-git/v5/object_walker.go b/vendor/github.com/go-git/go-git/v5/object_walker.go new file mode 100644 index 0000000000..3fcdd2999c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/object_walker.go @@ -0,0 +1,104 @@ +package git + +import ( + "fmt" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/filemode" + "github.com/go-git/go-git/v5/plumbing/object" + "github.com/go-git/go-git/v5/storage" +) + +type objectWalker struct { + Storer storage.Storer + // seen is the set of objects seen in the repo. + // seen map can become huge if walking over large + // repos. Thus using struct{} as the value type. + seen map[plumbing.Hash]struct{} +} + +func newObjectWalker(s storage.Storer) *objectWalker { + return &objectWalker{s, map[plumbing.Hash]struct{}{}} +} + +// walkAllRefs walks all (hash) references from the repo. +func (p *objectWalker) walkAllRefs() error { + // Walk over all the references in the repo. + it, err := p.Storer.IterReferences() + if err != nil { + return err + } + defer it.Close() + err = it.ForEach(func(ref *plumbing.Reference) error { + // Exit this iteration early for non-hash references. + if ref.Type() != plumbing.HashReference { + return nil + } + return p.walkObjectTree(ref.Hash()) + }) + return err +} + +func (p *objectWalker) isSeen(hash plumbing.Hash) bool { + _, seen := p.seen[hash] + return seen +} + +func (p *objectWalker) add(hash plumbing.Hash) { + p.seen[hash] = struct{}{} +} + +// walkObjectTree walks over all objects and remembers references +// to them in the objectWalker. This is used instead of the revlist +// walks because memory usage is tight with huge repos. +func (p *objectWalker) walkObjectTree(hash plumbing.Hash) error { + // Check if we have already seen, and mark this object + if p.isSeen(hash) { + return nil + } + p.add(hash) + // Fetch the object. + obj, err := object.GetObject(p.Storer, hash) + if err != nil { + return fmt.Errorf("Getting object %s failed: %v", hash, err) + } + // Walk all children depending on object type. + switch obj := obj.(type) { + case *object.Commit: + err = p.walkObjectTree(obj.TreeHash) + if err != nil { + return err + } + for _, h := range obj.ParentHashes { + err = p.walkObjectTree(h) + if err != nil { + return err + } + } + case *object.Tree: + for i := range obj.Entries { + // Shortcut for blob objects: + // 'or' the lower bits of a mode and check that it + // it matches a filemode.Executable. The type information + // is in the higher bits, but this is the cleanest way + // to handle plain files with different modes. + // Other non-tree objects are somewhat rare, so they + // are not special-cased. + if obj.Entries[i].Mode|0755 == filemode.Executable { + p.add(obj.Entries[i].Hash) + continue + } + // Normal walk for sub-trees (and symlinks etc). + err = p.walkObjectTree(obj.Entries[i].Hash) + if err != nil { + return err + } + } + case *object.Tag: + return p.walkObjectTree(obj.Target) + default: + // Error out on unhandled object types. + return fmt.Errorf("Unknown object %X %s %T\n", obj.ID(), obj.Type(), obj) + } + return nil +} diff --git a/vendor/github.com/go-git/go-git/v5/options.go b/vendor/github.com/go-git/go-git/v5/options.go new file mode 100644 index 0000000000..2f9363150c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/options.go @@ -0,0 +1,611 @@ +package git + +import ( + "errors" + "fmt" + "regexp" + "strings" + "time" + + "github.com/go-git/go-git/v5/config" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/object" + "github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband" + "github.com/go-git/go-git/v5/plumbing/transport" + "golang.org/x/crypto/openpgp" +) + +// SubmoduleRescursivity defines how depth will affect any submodule recursive +// operation. +type SubmoduleRescursivity uint + +const ( + // DefaultRemoteName name of the default Remote, just like git command. + DefaultRemoteName = "origin" + + // NoRecurseSubmodules disables the recursion for a submodule operation. + NoRecurseSubmodules SubmoduleRescursivity = 0 + // DefaultSubmoduleRecursionDepth allow recursion in a submodule operation. + DefaultSubmoduleRecursionDepth SubmoduleRescursivity = 10 +) + +var ( + ErrMissingURL = errors.New("URL field is required") +) + +// CloneOptions describes how a clone should be performed. +type CloneOptions struct { + // The (possibly remote) repository URL to clone from. + URL string + // Auth credentials, if required, to use with the remote repository. + Auth transport.AuthMethod + // Name of the remote to be added, by default `origin`. + RemoteName string + // Remote branch to clone. + ReferenceName plumbing.ReferenceName + // Fetch only ReferenceName if true. + SingleBranch bool + // No checkout of HEAD after clone if true. + NoCheckout bool + // Limit fetching to the specified number of commits. + Depth int + // RecurseSubmodules after the clone is created, initialize all submodules + // within, using their default settings. This option is ignored if the + // cloned repository does not have a worktree. + RecurseSubmodules SubmoduleRescursivity + // Progress is where the human readable information sent by the server is + // stored, if nil nothing is stored and the capability (if supported) + // no-progress, is sent to the server to avoid send this information. + Progress sideband.Progress + // Tags describe how the tags will be fetched from the remote repository, + // by default is AllTags. + Tags TagMode +} + +// Validate validates the fields and sets the default values. +func (o *CloneOptions) Validate() error { + if o.URL == "" { + return ErrMissingURL + } + + if o.RemoteName == "" { + o.RemoteName = DefaultRemoteName + } + + if o.ReferenceName == "" { + o.ReferenceName = plumbing.HEAD + } + + if o.Tags == InvalidTagMode { + o.Tags = AllTags + } + + return nil +} + +// PullOptions describes how a pull should be performed. +type PullOptions struct { + // Name of the remote to be pulled. If empty, uses the default. + RemoteName string + // Remote branch to clone. If empty, uses HEAD. + ReferenceName plumbing.ReferenceName + // Fetch only ReferenceName if true. + SingleBranch bool + // Limit fetching to the specified number of commits. + Depth int + // Auth credentials, if required, to use with the remote repository. + Auth transport.AuthMethod + // RecurseSubmodules controls if new commits of all populated submodules + // should be fetched too. + RecurseSubmodules SubmoduleRescursivity + // Progress is where the human readable information sent by the server is + // stored, if nil nothing is stored and the capability (if supported) + // no-progress, is sent to the server to avoid send this information. + Progress sideband.Progress + // Force allows the pull to update a local branch even when the remote + // branch does not descend from it. + Force bool +} + +// Validate validates the fields and sets the default values. +func (o *PullOptions) Validate() error { + if o.RemoteName == "" { + o.RemoteName = DefaultRemoteName + } + + if o.ReferenceName == "" { + o.ReferenceName = plumbing.HEAD + } + + return nil +} + +type TagMode int + +const ( + InvalidTagMode TagMode = iota + // TagFollowing any tag that points into the histories being fetched is also + // fetched. TagFollowing requires a server with `include-tag` capability + // in order to fetch the annotated tags objects. + TagFollowing + // AllTags fetch all tags from the remote (i.e., fetch remote tags + // refs/tags/* into local tags with the same name) + AllTags + //NoTags fetch no tags from the remote at all + NoTags +) + +// FetchOptions describes how a fetch should be performed +type FetchOptions struct { + // Name of the remote to fetch from. Defaults to origin. + RemoteName string + RefSpecs []config.RefSpec + // Depth limit fetching to the specified number of commits from the tip of + // each remote branch history. + Depth int + // Auth credentials, if required, to use with the remote repository. + Auth transport.AuthMethod + // Progress is where the human readable information sent by the server is + // stored, if nil nothing is stored and the capability (if supported) + // no-progress, is sent to the server to avoid send this information. + Progress sideband.Progress + // Tags describe how the tags will be fetched from the remote repository, + // by default is TagFollowing. + Tags TagMode + // Force allows the fetch to update a local branch even when the remote + // branch does not descend from it. + Force bool +} + +// Validate validates the fields and sets the default values. +func (o *FetchOptions) Validate() error { + if o.RemoteName == "" { + o.RemoteName = DefaultRemoteName + } + + if o.Tags == InvalidTagMode { + o.Tags = TagFollowing + } + + for _, r := range o.RefSpecs { + if err := r.Validate(); err != nil { + return err + } + } + + return nil +} + +// PushOptions describes how a push should be performed. +type PushOptions struct { + // RemoteName is the name of the remote to be pushed to. + RemoteName string + // RefSpecs specify what destination ref to update with what source + // object. A refspec with empty src can be used to delete a reference. + RefSpecs []config.RefSpec + // Auth credentials, if required, to use with the remote repository. + Auth transport.AuthMethod + // Progress is where the human readable information sent by the server is + // stored, if nil nothing is stored. + Progress sideband.Progress + // Prune specify that remote refs that match given RefSpecs and that do + // not exist locally will be removed. + Prune bool + // Force allows the push to update a remote branch even when the local + // branch does not descend from it. + Force bool +} + +// Validate validates the fields and sets the default values. +func (o *PushOptions) Validate() error { + if o.RemoteName == "" { + o.RemoteName = DefaultRemoteName + } + + if len(o.RefSpecs) == 0 { + o.RefSpecs = []config.RefSpec{ + config.RefSpec(config.DefaultPushRefSpec), + } + } + + for _, r := range o.RefSpecs { + if err := r.Validate(); err != nil { + return err + } + } + + return nil +} + +// SubmoduleUpdateOptions describes how a submodule update should be performed. +type SubmoduleUpdateOptions struct { + // Init, if true initializes the submodules recorded in the index. + Init bool + // NoFetch tell to the update command to not fetch new objects from the + // remote site. + NoFetch bool + // RecurseSubmodules the update is performed not only in the submodules of + // the current repository but also in any nested submodules inside those + // submodules (and so on). Until the SubmoduleRescursivity is reached. + RecurseSubmodules SubmoduleRescursivity + // Auth credentials, if required, to use with the remote repository. + Auth transport.AuthMethod +} + +var ( + ErrBranchHashExclusive = errors.New("Branch and Hash are mutually exclusive") + ErrCreateRequiresBranch = errors.New("Branch is mandatory when Create is used") +) + +// CheckoutOptions describes how a checkout operation should be performed. +type CheckoutOptions struct { + // Hash is the hash of the commit to be checked out. If used, HEAD will be + // in detached mode. If Create is not used, Branch and Hash are mutually + // exclusive. + Hash plumbing.Hash + // Branch to be checked out, if Branch and Hash are empty is set to `master`. + Branch plumbing.ReferenceName + // Create a new branch named Branch and start it at Hash. + Create bool + // Force, if true when switching branches, proceed even if the index or the + // working tree differs from HEAD. This is used to throw away local changes + Force bool + // Keep, if true when switching branches, local changes (the index or the + // working tree changes) will be kept so that they can be committed to the + // target branch. Force and Keep are mutually exclusive, should not be both + // set to true. + Keep bool +} + +// Validate validates the fields and sets the default values. +func (o *CheckoutOptions) Validate() error { + if !o.Create && !o.Hash.IsZero() && o.Branch != "" { + return ErrBranchHashExclusive + } + + if o.Create && o.Branch == "" { + return ErrCreateRequiresBranch + } + + if o.Branch == "" { + o.Branch = plumbing.Master + } + + return nil +} + +// ResetMode defines the mode of a reset operation. +type ResetMode int8 + +const ( + // MixedReset resets the index but not the working tree (i.e., the changed + // files are preserved but not marked for commit) and reports what has not + // been updated. This is the default action. + MixedReset ResetMode = iota + // HardReset resets the index and working tree. Any changes to tracked files + // in the working tree are discarded. + HardReset + // MergeReset resets the index and updates the files in the working tree + // that are different between Commit and HEAD, but keeps those which are + // different between the index and working tree (i.e. which have changes + // which have not been added). + // + // If a file that is different between Commit and the index has unstaged + // changes, reset is aborted. + MergeReset + // SoftReset does not touch the index file or the working tree at all (but + // resets the head to , just like all modes do). This leaves all + // your changed files "Changes to be committed", as git status would put it. + SoftReset +) + +// ResetOptions describes how a reset operation should be performed. +type ResetOptions struct { + // Commit, if commit is present set the current branch head (HEAD) to it. + Commit plumbing.Hash + // Mode, form resets the current branch head to Commit and possibly updates + // the index (resetting it to the tree of Commit) and the working tree + // depending on Mode. If empty MixedReset is used. + Mode ResetMode +} + +// Validate validates the fields and sets the default values. +func (o *ResetOptions) Validate(r *Repository) error { + if o.Commit == plumbing.ZeroHash { + ref, err := r.Head() + if err != nil { + return err + } + + o.Commit = ref.Hash() + } + + return nil +} + +type LogOrder int8 + +const ( + LogOrderDefault LogOrder = iota + LogOrderDFS + LogOrderDFSPost + LogOrderBSF + LogOrderCommitterTime +) + +// LogOptions describes how a log action should be performed. +type LogOptions struct { + // When the From option is set the log will only contain commits + // reachable from it. If this option is not set, HEAD will be used as + // the default From. + From plumbing.Hash + + // The default traversal algorithm is Depth-first search + // set Order=LogOrderCommitterTime for ordering by committer time (more compatible with `git log`) + // set Order=LogOrderBSF for Breadth-first search + Order LogOrder + + // Show only those commits in which the specified file was inserted/updated. + // It is equivalent to running `git log -- `. + // this field is kept for compatility, it can be replaced with PathFilter + FileName *string + + // Filter commits based on the path of files that are updated + // takes file path as argument and should return true if the file is desired + // It can be used to implement `git log -- ` + // either is a file path, or directory path, or a regexp of file/directory path + PathFilter func(string) bool + + // Pretend as if all the refs in refs/, along with HEAD, are listed on the command line as . + // It is equivalent to running `git log --all`. + // If set on true, the From option will be ignored. + All bool + + // Show commits more recent than a specific date. + // It is equivalent to running `git log --since ` or `git log --after `. + Since *time.Time + + // Show commits older than a specific date. + // It is equivalent to running `git log --until ` or `git log --before `. + Until *time.Time +} + +var ( + ErrMissingAuthor = errors.New("author field is required") +) + +// AddOptions describes how a add operation should be performed +type AddOptions struct { + // All equivalent to `git add -A`, update the index not only where the + // working tree has a file matching `Path` but also where the index already + // has an entry. This adds, modifies, and removes index entries to match the + // working tree. If no `Path` nor `Glob` is given when `All` option is + // used, all files in the entire working tree are updated. + All bool + // Path is the exact filepath to a the file or directory to be added. + Path string + // Glob adds all paths, matching pattern, to the index. If pattern matches a + // directory path, all directory contents are added to the index recursively. + Glob string +} + +// Validate validates the fields and sets the default values. +func (o *AddOptions) Validate(r *Repository) error { + if o.Path != "" && o.Glob != "" { + return fmt.Errorf("fields Path and Glob are mutual exclusive") + } + + return nil +} + +// CommitOptions describes how a commit operation should be performed. +type CommitOptions struct { + // All automatically stage files that have been modified and deleted, but + // new files you have not told Git about are not affected. + All bool + // Author is the author's signature of the commit. If Author is empty the + // Name and Email is read from the config, and time.Now it's used as When. + Author *object.Signature + // Committer is the committer's signature of the commit. If Committer is + // nil the Author signature is used. + Committer *object.Signature + // Parents are the parents commits for the new commit, by default when + // len(Parents) is zero, the hash of HEAD reference is used. + Parents []plumbing.Hash + // SignKey denotes a key to sign the commit with. A nil value here means the + // commit will not be signed. The private key must be present and already + // decrypted. + SignKey *openpgp.Entity +} + +// Validate validates the fields and sets the default values. +func (o *CommitOptions) Validate(r *Repository) error { + if o.Author == nil { + if err := o.loadConfigAuthorAndCommitter(r); err != nil { + return err + } + } + + if o.Committer == nil { + o.Committer = o.Author + } + + if len(o.Parents) == 0 { + head, err := r.Head() + if err != nil && err != plumbing.ErrReferenceNotFound { + return err + } + + if head != nil { + o.Parents = []plumbing.Hash{head.Hash()} + } + } + + return nil +} + +func (o *CommitOptions) loadConfigAuthorAndCommitter(r *Repository) error { + cfg, err := r.ConfigScoped(config.SystemScope) + if err != nil { + return err + } + + if o.Author == nil && cfg.Author.Email != "" && cfg.Author.Name != "" { + o.Author = &object.Signature{ + Name: cfg.Author.Name, + Email: cfg.Author.Email, + When: time.Now(), + } + } + + if o.Committer == nil && cfg.Committer.Email != "" && cfg.Committer.Name != "" { + o.Committer = &object.Signature{ + Name: cfg.Committer.Name, + Email: cfg.Committer.Email, + When: time.Now(), + } + } + + if o.Author == nil && cfg.User.Email != "" && cfg.User.Name != "" { + o.Author = &object.Signature{ + Name: cfg.User.Name, + Email: cfg.User.Email, + When: time.Now(), + } + } + + if o.Author == nil { + return ErrMissingAuthor + } + + return nil +} + +var ( + ErrMissingName = errors.New("name field is required") + ErrMissingTagger = errors.New("tagger field is required") + ErrMissingMessage = errors.New("message field is required") +) + +// CreateTagOptions describes how a tag object should be created. +type CreateTagOptions struct { + // Tagger defines the signature of the tag creator. If Tagger is empty the + // Name and Email is read from the config, and time.Now it's used as When. + Tagger *object.Signature + // Message defines the annotation of the tag. It is canonicalized during + // validation into the format expected by git - no leading whitespace and + // ending in a newline. + Message string + // SignKey denotes a key to sign the tag with. A nil value here means the tag + // will not be signed. The private key must be present and already decrypted. + SignKey *openpgp.Entity +} + +// Validate validates the fields and sets the default values. +func (o *CreateTagOptions) Validate(r *Repository, hash plumbing.Hash) error { + if o.Tagger == nil { + if err := o.loadConfigTagger(r); err != nil { + return err + } + } + + if o.Message == "" { + return ErrMissingMessage + } + + // Canonicalize the message into the expected message format. + o.Message = strings.TrimSpace(o.Message) + "\n" + + return nil +} + +func (o *CreateTagOptions) loadConfigTagger(r *Repository) error { + cfg, err := r.ConfigScoped(config.SystemScope) + if err != nil { + return err + } + + if o.Tagger == nil && cfg.Author.Email != "" && cfg.Author.Name != "" { + o.Tagger = &object.Signature{ + Name: cfg.Author.Name, + Email: cfg.Author.Email, + When: time.Now(), + } + } + + if o.Tagger == nil && cfg.User.Email != "" && cfg.User.Name != "" { + o.Tagger = &object.Signature{ + Name: cfg.User.Name, + Email: cfg.User.Email, + When: time.Now(), + } + } + + if o.Tagger == nil { + return ErrMissingTagger + } + + return nil +} + +// ListOptions describes how a remote list should be performed. +type ListOptions struct { + // Auth credentials, if required, to use with the remote repository. + Auth transport.AuthMethod +} + +// CleanOptions describes how a clean should be performed. +type CleanOptions struct { + Dir bool +} + +// GrepOptions describes how a grep should be performed. +type GrepOptions struct { + // Patterns are compiled Regexp objects to be matched. + Patterns []*regexp.Regexp + // InvertMatch selects non-matching lines. + InvertMatch bool + // CommitHash is the hash of the commit from which worktree should be derived. + CommitHash plumbing.Hash + // ReferenceName is the branch or tag name from which worktree should be derived. + ReferenceName plumbing.ReferenceName + // PathSpecs are compiled Regexp objects of pathspec to use in the matching. + PathSpecs []*regexp.Regexp +} + +var ( + ErrHashOrReference = errors.New("ambiguous options, only one of CommitHash or ReferenceName can be passed") +) + +// Validate validates the fields and sets the default values. +func (o *GrepOptions) Validate(w *Worktree) error { + if !o.CommitHash.IsZero() && o.ReferenceName != "" { + return ErrHashOrReference + } + + // If none of CommitHash and ReferenceName are provided, set commit hash of + // the repository's head. + if o.CommitHash.IsZero() && o.ReferenceName == "" { + ref, err := w.r.Head() + if err != nil { + return err + } + o.CommitHash = ref.Hash() + } + + return nil +} + +// PlainOpenOptions describes how opening a plain repository should be +// performed. +type PlainOpenOptions struct { + // DetectDotGit defines whether parent directories should be + // walked until a .git directory or file is found. + DetectDotGit bool + // Enable .git/commondir support (see https://git-scm.com/docs/gitrepository-layout#Documentation/gitrepository-layout.txt). + // NOTE: This option will only work with the filesystem storage. + EnableDotGitCommonDir bool +} + +// Validate validates the fields and sets the default values. +func (o *PlainOpenOptions) Validate() error { return nil } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/cache/buffer_lru.go b/vendor/github.com/go-git/go-git/v5/plumbing/cache/buffer_lru.go new file mode 100644 index 0000000000..acaf195203 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/cache/buffer_lru.go @@ -0,0 +1,98 @@ +package cache + +import ( + "container/list" + "sync" +) + +// BufferLRU implements an object cache with an LRU eviction policy and a +// maximum size (measured in object size). +type BufferLRU struct { + MaxSize FileSize + + actualSize FileSize + ll *list.List + cache map[int64]*list.Element + mut sync.Mutex +} + +// NewBufferLRU creates a new BufferLRU with the given maximum size. The maximum +// size will never be exceeded. +func NewBufferLRU(maxSize FileSize) *BufferLRU { + return &BufferLRU{MaxSize: maxSize} +} + +// NewBufferLRUDefault creates a new BufferLRU with the default cache size. +func NewBufferLRUDefault() *BufferLRU { + return &BufferLRU{MaxSize: DefaultMaxSize} +} + +type buffer struct { + Key int64 + Slice []byte +} + +// Put puts a buffer into the cache. If the buffer is already in the cache, it +// will be marked as used. Otherwise, it will be inserted. A buffers might +// be evicted to make room for the new one. +func (c *BufferLRU) Put(key int64, slice []byte) { + c.mut.Lock() + defer c.mut.Unlock() + + if c.cache == nil { + c.actualSize = 0 + c.cache = make(map[int64]*list.Element, 1000) + c.ll = list.New() + } + + bufSize := FileSize(len(slice)) + if ee, ok := c.cache[key]; ok { + oldBuf := ee.Value.(buffer) + // in this case bufSize is a delta: new size - old size + bufSize -= FileSize(len(oldBuf.Slice)) + c.ll.MoveToFront(ee) + ee.Value = buffer{key, slice} + } else { + if bufSize > c.MaxSize { + return + } + ee := c.ll.PushFront(buffer{key, slice}) + c.cache[key] = ee + } + + c.actualSize += bufSize + for c.actualSize > c.MaxSize { + last := c.ll.Back() + lastObj := last.Value.(buffer) + lastSize := FileSize(len(lastObj.Slice)) + + c.ll.Remove(last) + delete(c.cache, lastObj.Key) + c.actualSize -= lastSize + } +} + +// Get returns a buffer by its key. It marks the buffer as used. If the buffer +// is not in the cache, (nil, false) will be returned. +func (c *BufferLRU) Get(key int64) ([]byte, bool) { + c.mut.Lock() + defer c.mut.Unlock() + + ee, ok := c.cache[key] + if !ok { + return nil, false + } + + c.ll.MoveToFront(ee) + return ee.Value.(buffer).Slice, true +} + +// Clear the content of this buffer cache. +func (c *BufferLRU) Clear() { + c.mut.Lock() + defer c.mut.Unlock() + + c.ll = nil + c.cache = nil + c.actualSize = 0 +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/cache/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/cache/common.go new file mode 100644 index 0000000000..7b0d0c76bb --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/cache/common.go @@ -0,0 +1,39 @@ +package cache + +import "github.com/go-git/go-git/v5/plumbing" + +const ( + Byte FileSize = 1 << (iota * 10) + KiByte + MiByte + GiByte +) + +type FileSize int64 + +const DefaultMaxSize FileSize = 96 * MiByte + +// Object is an interface to a object cache. +type Object interface { + // Put puts the given object into the cache. Whether this object will + // actually be put into the cache or not is implementation specific. + Put(o plumbing.EncodedObject) + // Get gets an object from the cache given its hash. The second return value + // is true if the object was returned, and false otherwise. + Get(k plumbing.Hash) (plumbing.EncodedObject, bool) + // Clear clears every object from the cache. + Clear() +} + +// Buffer is an interface to a buffer cache. +type Buffer interface { + // Put puts a buffer into the cache. If the buffer is already in the cache, + // it will be marked as used. Otherwise, it will be inserted. Buffer might + // be evicted to make room for the new one. + Put(key int64, slice []byte) + // Get returns a buffer by its key. It marks the buffer as used. If the + // buffer is not in the cache, (nil, false) will be returned. + Get(key int64) ([]byte, bool) + // Clear clears every object from the cache. + Clear() +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/cache/object_lru.go b/vendor/github.com/go-git/go-git/v5/plumbing/cache/object_lru.go new file mode 100644 index 0000000000..c50d0d1e6c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/cache/object_lru.go @@ -0,0 +1,101 @@ +package cache + +import ( + "container/list" + "sync" + + "github.com/go-git/go-git/v5/plumbing" +) + +// ObjectLRU implements an object cache with an LRU eviction policy and a +// maximum size (measured in object size). +type ObjectLRU struct { + MaxSize FileSize + + actualSize FileSize + ll *list.List + cache map[interface{}]*list.Element + mut sync.Mutex +} + +// NewObjectLRU creates a new ObjectLRU with the given maximum size. The maximum +// size will never be exceeded. +func NewObjectLRU(maxSize FileSize) *ObjectLRU { + return &ObjectLRU{MaxSize: maxSize} +} + +// NewObjectLRUDefault creates a new ObjectLRU with the default cache size. +func NewObjectLRUDefault() *ObjectLRU { + return &ObjectLRU{MaxSize: DefaultMaxSize} +} + +// Put puts an object into the cache. If the object is already in the cache, it +// will be marked as used. Otherwise, it will be inserted. A single object might +// be evicted to make room for the new object. +func (c *ObjectLRU) Put(obj plumbing.EncodedObject) { + c.mut.Lock() + defer c.mut.Unlock() + + if c.cache == nil { + c.actualSize = 0 + c.cache = make(map[interface{}]*list.Element, 1000) + c.ll = list.New() + } + + objSize := FileSize(obj.Size()) + key := obj.Hash() + if ee, ok := c.cache[key]; ok { + oldObj := ee.Value.(plumbing.EncodedObject) + // in this case objSize is a delta: new size - old size + objSize -= FileSize(oldObj.Size()) + c.ll.MoveToFront(ee) + ee.Value = obj + } else { + if objSize > c.MaxSize { + return + } + ee := c.ll.PushFront(obj) + c.cache[key] = ee + } + + c.actualSize += objSize + for c.actualSize > c.MaxSize { + last := c.ll.Back() + if last == nil { + c.actualSize = 0 + break + } + + lastObj := last.Value.(plumbing.EncodedObject) + lastSize := FileSize(lastObj.Size()) + + c.ll.Remove(last) + delete(c.cache, lastObj.Hash()) + c.actualSize -= lastSize + } +} + +// Get returns an object by its hash. It marks the object as used. If the object +// is not in the cache, (nil, false) will be returned. +func (c *ObjectLRU) Get(k plumbing.Hash) (plumbing.EncodedObject, bool) { + c.mut.Lock() + defer c.mut.Unlock() + + ee, ok := c.cache[k] + if !ok { + return nil, false + } + + c.ll.MoveToFront(ee) + return ee.Value.(plumbing.EncodedObject), true +} + +// Clear the content of this object cache. +func (c *ObjectLRU) Clear() { + c.mut.Lock() + defer c.mut.Unlock() + + c.ll = nil + c.cache = nil + c.actualSize = 0 +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/color/color.go b/vendor/github.com/go-git/go-git/v5/plumbing/color/color.go new file mode 100644 index 0000000000..2cd74bdc1a --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/color/color.go @@ -0,0 +1,38 @@ +package color + +// TODO read colors from a github.com/go-git/go-git/plumbing/format/config.Config struct +// TODO implement color parsing, see https://github.com/git/git/blob/v2.26.2/color.c + +// Colors. See https://github.com/git/git/blob/v2.26.2/color.h#L24-L53. +const ( + Normal = "" + Reset = "\033[m" + Bold = "\033[1m" + Red = "\033[31m" + Green = "\033[32m" + Yellow = "\033[33m" + Blue = "\033[34m" + Magenta = "\033[35m" + Cyan = "\033[36m" + BoldRed = "\033[1;31m" + BoldGreen = "\033[1;32m" + BoldYellow = "\033[1;33m" + BoldBlue = "\033[1;34m" + BoldMagenta = "\033[1;35m" + BoldCyan = "\033[1;36m" + FaintRed = "\033[2;31m" + FaintGreen = "\033[2;32m" + FaintYellow = "\033[2;33m" + FaintBlue = "\033[2;34m" + FaintMagenta = "\033[2;35m" + FaintCyan = "\033[2;36m" + BgRed = "\033[41m" + BgGreen = "\033[42m" + BgYellow = "\033[43m" + BgBlue = "\033[44m" + BgMagenta = "\033[45m" + BgCyan = "\033[46m" + Faint = "\033[2m" + FaintItalic = "\033[2;3m" + Reverse = "\033[7m" +) diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/error.go b/vendor/github.com/go-git/go-git/v5/plumbing/error.go new file mode 100644 index 0000000000..a3ebed3f6c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/error.go @@ -0,0 +1,35 @@ +package plumbing + +import "fmt" + +type PermanentError struct { + Err error +} + +func NewPermanentError(err error) *PermanentError { + if err == nil { + return nil + } + + return &PermanentError{Err: err} +} + +func (e *PermanentError) Error() string { + return fmt.Sprintf("permanent client error: %s", e.Err.Error()) +} + +type UnexpectedError struct { + Err error +} + +func NewUnexpectedError(err error) *UnexpectedError { + if err == nil { + return nil + } + + return &UnexpectedError{Err: err} +} + +func (e *UnexpectedError) Error() string { + return fmt.Sprintf("unexpected client error: %s", e.Err.Error()) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/filemode/filemode.go b/vendor/github.com/go-git/go-git/v5/plumbing/filemode/filemode.go new file mode 100644 index 0000000000..b848a97961 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/filemode/filemode.go @@ -0,0 +1,188 @@ +package filemode + +import ( + "encoding/binary" + "fmt" + "os" + "strconv" +) + +// A FileMode represents the kind of tree entries used by git. It +// resembles regular file systems modes, although FileModes are +// considerably simpler (there are not so many), and there are some, +// like Submodule that has no file system equivalent. +type FileMode uint32 + +const ( + // Empty is used as the FileMode of tree elements when comparing + // trees in the following situations: + // + // - the mode of tree elements before their creation. - the mode of + // tree elements after their deletion. - the mode of unmerged + // elements when checking the index. + // + // Empty has no file system equivalent. As Empty is the zero value + // of FileMode, it is also returned by New and + // NewFromOsNewFromOSFileMode along with an error, when they fail. + Empty FileMode = 0 + // Dir represent a Directory. + Dir FileMode = 0040000 + // Regular represent non-executable files. Please note this is not + // the same as golang regular files, which include executable files. + Regular FileMode = 0100644 + // Deprecated represent non-executable files with the group writable + // bit set. This mode was supported by the first versions of git, + // but it has been deprecated nowadays. This library uses them + // internally, so you can read old packfiles, but will treat them as + // Regulars when interfacing with the outside world. This is the + // standard git behaviour. + Deprecated FileMode = 0100664 + // Executable represents executable files. + Executable FileMode = 0100755 + // Symlink represents symbolic links to files. + Symlink FileMode = 0120000 + // Submodule represents git submodules. This mode has no file system + // equivalent. + Submodule FileMode = 0160000 +) + +// New takes the octal string representation of a FileMode and returns +// the FileMode and a nil error. If the string can not be parsed to a +// 32 bit unsigned octal number, it returns Empty and the parsing error. +// +// Example: "40000" means Dir, "100644" means Regular. +// +// Please note this function does not check if the returned FileMode +// is valid in git or if it is malformed. For instance, "1" will +// return the malformed FileMode(1) and a nil error. +func New(s string) (FileMode, error) { + n, err := strconv.ParseUint(s, 8, 32) + if err != nil { + return Empty, err + } + + return FileMode(n), nil +} + +// NewFromOSFileMode returns the FileMode used by git to represent +// the provided file system modes and a nil error on success. If the +// file system mode cannot be mapped to any valid git mode (as with +// sockets or named pipes), it will return Empty and an error. +// +// Note that some git modes cannot be generated from os.FileModes, like +// Deprecated and Submodule; while Empty will be returned, along with an +// error, only when the method fails. +func NewFromOSFileMode(m os.FileMode) (FileMode, error) { + if m.IsRegular() { + if isSetTemporary(m) { + return Empty, fmt.Errorf("no equivalent git mode for %s", m) + } + if isSetCharDevice(m) { + return Empty, fmt.Errorf("no equivalent git mode for %s", m) + } + if isSetUserExecutable(m) { + return Executable, nil + } + return Regular, nil + } + + if m.IsDir() { + return Dir, nil + } + + if isSetSymLink(m) { + return Symlink, nil + } + + return Empty, fmt.Errorf("no equivalent git mode for %s", m) +} + +func isSetCharDevice(m os.FileMode) bool { + return m&os.ModeCharDevice != 0 +} + +func isSetTemporary(m os.FileMode) bool { + return m&os.ModeTemporary != 0 +} + +func isSetUserExecutable(m os.FileMode) bool { + return m&0100 != 0 +} + +func isSetSymLink(m os.FileMode) bool { + return m&os.ModeSymlink != 0 +} + +// Bytes return a slice of 4 bytes with the mode in little endian +// encoding. +func (m FileMode) Bytes() []byte { + ret := make([]byte, 4) + binary.LittleEndian.PutUint32(ret, uint32(m)) + return ret +} + +// IsMalformed returns if the FileMode should not appear in a git packfile, +// this is: Empty and any other mode not mentioned as a constant in this +// package. +func (m FileMode) IsMalformed() bool { + return m != Dir && + m != Regular && + m != Deprecated && + m != Executable && + m != Symlink && + m != Submodule +} + +// String returns the FileMode as a string in the standatd git format, +// this is, an octal number padded with ceros to 7 digits. Malformed +// modes are printed in that same format, for easier debugging. +// +// Example: Regular is "0100644", Empty is "0000000". +func (m FileMode) String() string { + return fmt.Sprintf("%07o", uint32(m)) +} + +// IsRegular returns if the FileMode represents that of a regular file, +// this is, either Regular or Deprecated. Please note that Executable +// are not regular even though in the UNIX tradition, they usually are: +// See the IsFile method. +func (m FileMode) IsRegular() bool { + return m == Regular || + m == Deprecated +} + +// IsFile returns if the FileMode represents that of a file, this is, +// Regular, Deprecated, Executable or Link. +func (m FileMode) IsFile() bool { + return m == Regular || + m == Deprecated || + m == Executable || + m == Symlink +} + +// ToOSFileMode returns the os.FileMode to be used when creating file +// system elements with the given git mode and a nil error on success. +// +// When the provided mode cannot be mapped to a valid file system mode +// (e.g. Submodule) it returns os.FileMode(0) and an error. +// +// The returned file mode does not take into account the umask. +func (m FileMode) ToOSFileMode() (os.FileMode, error) { + switch m { + case Dir: + return os.ModePerm | os.ModeDir, nil + case Submodule: + return os.ModePerm | os.ModeDir, nil + case Regular: + return os.FileMode(0644), nil + // Deprecated is no longer allowed: treated as a Regular instead + case Deprecated: + return os.FileMode(0644), nil + case Executable: + return os.FileMode(0755), nil + case Symlink: + return os.ModePerm | os.ModeSymlink, nil + } + + return os.FileMode(0), fmt.Errorf("malformed mode (%s)", m) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/common.go new file mode 100644 index 0000000000..6d689ea1e0 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/common.go @@ -0,0 +1,109 @@ +package config + +// New creates a new config instance. +func New() *Config { + return &Config{} +} + +// Config contains all the sections, comments and includes from a config file. +type Config struct { + Comment *Comment + Sections Sections + Includes Includes +} + +// Includes is a list of Includes in a config file. +type Includes []*Include + +// Include is a reference to an included config file. +type Include struct { + Path string + Config *Config +} + +// Comment string without the prefix '#' or ';'. +type Comment string + +const ( + // NoSubsection token is passed to Config.Section and Config.SetSection to + // represent the absence of a section. + NoSubsection = "" +) + +// Section returns a existing section with the given name or creates a new one. +func (c *Config) Section(name string) *Section { + for i := len(c.Sections) - 1; i >= 0; i-- { + s := c.Sections[i] + if s.IsName(name) { + return s + } + } + + s := &Section{Name: name} + c.Sections = append(c.Sections, s) + return s +} + +// HasSection checks if the Config has a section with the specified name. +func (c *Config) HasSection(name string) bool { + for _, s := range c.Sections { + if s.IsName(name) { + return true + } + } + return false +} + +// RemoveSection removes a section from a config file. +func (c *Config) RemoveSection(name string) *Config { + result := Sections{} + for _, s := range c.Sections { + if !s.IsName(name) { + result = append(result, s) + } + } + + c.Sections = result + return c +} + +// RemoveSubsection remove a subsection from a config file. +func (c *Config) RemoveSubsection(section string, subsection string) *Config { + for _, s := range c.Sections { + if s.IsName(section) { + result := Subsections{} + for _, ss := range s.Subsections { + if !ss.IsName(subsection) { + result = append(result, ss) + } + } + s.Subsections = result + } + } + + return c +} + +// AddOption adds an option to a given section and subsection. Use the +// NoSubsection constant for the subsection argument if no subsection is wanted. +func (c *Config) AddOption(section string, subsection string, key string, value string) *Config { + if subsection == "" { + c.Section(section).AddOption(key, value) + } else { + c.Section(section).Subsection(subsection).AddOption(key, value) + } + + return c +} + +// SetOption sets an option to a given section and subsection. Use the +// NoSubsection constant for the subsection argument if no subsection is wanted. +func (c *Config) SetOption(section string, subsection string, key string, value string) *Config { + if subsection == "" { + c.Section(section).SetOption(key, value) + } else { + c.Section(section).Subsection(subsection).SetOption(key, value) + } + + return c +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/decoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/decoder.go new file mode 100644 index 0000000000..8e52d57f30 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/decoder.go @@ -0,0 +1,37 @@ +package config + +import ( + "io" + + "github.com/go-git/gcfg" +) + +// A Decoder reads and decodes config files from an input stream. +type Decoder struct { + io.Reader +} + +// NewDecoder returns a new decoder that reads from r. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r} +} + +// Decode reads the whole config from its input and stores it in the +// value pointed to by config. +func (d *Decoder) Decode(config *Config) error { + cb := func(s string, ss string, k string, v string, bv bool) error { + if ss == "" && k == "" { + config.Section(s) + return nil + } + + if ss != "" && k == "" { + config.Section(s).Subsection(ss) + return nil + } + + config.AddOption(s, ss, k, v) + return nil + } + return gcfg.ReadWithCallback(d, cb) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/doc.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/doc.go new file mode 100644 index 0000000000..3986c83658 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/doc.go @@ -0,0 +1,122 @@ +// Package config implements encoding and decoding of git config files. +// +// Configuration File +// ------------------ +// +// The Git configuration file contains a number of variables that affect +// the Git commands' behavior. The `.git/config` file in each repository +// is used to store the configuration for that repository, and +// `$HOME/.gitconfig` is used to store a per-user configuration as +// fallback values for the `.git/config` file. The file `/etc/gitconfig` +// can be used to store a system-wide default configuration. +// +// The configuration variables are used by both the Git plumbing +// and the porcelains. The variables are divided into sections, wherein +// the fully qualified variable name of the variable itself is the last +// dot-separated segment and the section name is everything before the last +// dot. The variable names are case-insensitive, allow only alphanumeric +// characters and `-`, and must start with an alphabetic character. Some +// variables may appear multiple times; we say then that the variable is +// multivalued. +// +// Syntax +// ~~~~~~ +// +// The syntax is fairly flexible and permissive; whitespaces are mostly +// ignored. The '#' and ';' characters begin comments to the end of line, +// blank lines are ignored. +// +// The file consists of sections and variables. A section begins with +// the name of the section in square brackets and continues until the next +// section begins. Section names are case-insensitive. Only alphanumeric +// characters, `-` and `.` are allowed in section names. Each variable +// must belong to some section, which means that there must be a section +// header before the first setting of a variable. +// +// Sections can be further divided into subsections. To begin a subsection +// put its name in double quotes, separated by space from the section name, +// in the section header, like in the example below: +// +// -------- +// [section "subsection"] +// +// -------- +// +// Subsection names are case sensitive and can contain any characters except +// newline (doublequote `"` and backslash can be included by escaping them +// as `\"` and `\\`, respectively). Section headers cannot span multiple +// lines. Variables may belong directly to a section or to a given subsection. +// You can have `[section]` if you have `[section "subsection"]`, but you +// don't need to. +// +// There is also a deprecated `[section.subsection]` syntax. With this +// syntax, the subsection name is converted to lower-case and is also +// compared case sensitively. These subsection names follow the same +// restrictions as section names. +// +// All the other lines (and the remainder of the line after the section +// header) are recognized as setting variables, in the form +// 'name = value' (or just 'name', which is a short-hand to say that +// the variable is the boolean "true"). +// The variable names are case-insensitive, allow only alphanumeric characters +// and `-`, and must start with an alphabetic character. +// +// A line that defines a value can be continued to the next line by +// ending it with a `\`; the backquote and the end-of-line are +// stripped. Leading whitespaces after 'name =', the remainder of the +// line after the first comment character '#' or ';', and trailing +// whitespaces of the line are discarded unless they are enclosed in +// double quotes. Internal whitespaces within the value are retained +// verbatim. +// +// Inside double quotes, double quote `"` and backslash `\` characters +// must be escaped: use `\"` for `"` and `\\` for `\`. +// +// The following escape sequences (beside `\"` and `\\`) are recognized: +// `\n` for newline character (NL), `\t` for horizontal tabulation (HT, TAB) +// and `\b` for backspace (BS). Other char escape sequences (including octal +// escape sequences) are invalid. +// +// Includes +// ~~~~~~~~ +// +// You can include one config file from another by setting the special +// `include.path` variable to the name of the file to be included. The +// variable takes a pathname as its value, and is subject to tilde +// expansion. +// +// The included file is expanded immediately, as if its contents had been +// found at the location of the include directive. If the value of the +// `include.path` variable is a relative path, the path is considered to be +// relative to the configuration file in which the include directive was +// found. See below for examples. +// +// +// Example +// ~~~~~~~ +// +// # Core variables +// [core] +// ; Don't trust file modes +// filemode = false +// +// # Our diff algorithm +// [diff] +// external = /usr/local/bin/diff-wrapper +// renames = true +// +// [branch "devel"] +// remote = origin +// merge = refs/heads/devel +// +// # Proxy settings +// [core] +// gitProxy="ssh" for "kernel.org" +// gitProxy=default-proxy ; for the rest +// +// [include] +// path = /path/to/foo.inc ; include by absolute path +// path = foo ; expand "foo" relative to the current file +// path = ~/foo ; expand "foo" in your `$HOME` directory +// +package config diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/encoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/encoder.go new file mode 100644 index 0000000000..4eac8968ad --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/encoder.go @@ -0,0 +1,77 @@ +package config + +import ( + "fmt" + "io" + "strings" +) + +// An Encoder writes config files to an output stream. +type Encoder struct { + w io.Writer +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{w} +} + +// Encode writes the config in git config format to the stream of the encoder. +func (e *Encoder) Encode(cfg *Config) error { + for _, s := range cfg.Sections { + if err := e.encodeSection(s); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeSection(s *Section) error { + if len(s.Options) > 0 { + if err := e.printf("[%s]\n", s.Name); err != nil { + return err + } + + if err := e.encodeOptions(s.Options); err != nil { + return err + } + } + + for _, ss := range s.Subsections { + if err := e.encodeSubsection(s.Name, ss); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeSubsection(sectionName string, s *Subsection) error { + //TODO: escape + if err := e.printf("[%s \"%s\"]\n", sectionName, s.Name); err != nil { + return err + } + + return e.encodeOptions(s.Options) +} + +func (e *Encoder) encodeOptions(opts Options) error { + for _, o := range opts { + pattern := "\t%s = %s\n" + if strings.Contains(o.Value, "\\") { + pattern = "\t%s = %q\n" + } + + if err := e.printf(pattern, o.Key, o.Value); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) printf(msg string, args ...interface{}) error { + _, err := fmt.Fprintf(e.w, msg, args...) + return err +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/option.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/option.go new file mode 100644 index 0000000000..cad394810a --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/option.go @@ -0,0 +1,127 @@ +package config + +import ( + "fmt" + "strings" +) + +// Option defines a key/value entity in a config file. +type Option struct { + // Key preserving original caseness. + // Use IsKey instead to compare key regardless of caseness. + Key string + // Original value as string, could be not normalized. + Value string +} + +type Options []*Option + +// IsKey returns true if the given key matches +// this option's key in a case-insensitive comparison. +func (o *Option) IsKey(key string) bool { + return strings.EqualFold(o.Key, key) +} + +func (opts Options) GoString() string { + var strs []string + for _, opt := range opts { + strs = append(strs, fmt.Sprintf("%#v", opt)) + } + + return strings.Join(strs, ", ") +} + +// Get gets the value for the given key if set, +// otherwise it returns the empty string. +// +// Note that there is no difference +// +// This matches git behaviour since git v1.8.1-rc1, +// if there are multiple definitions of a key, the +// last one wins. +// +// See: http://article.gmane.org/gmane.linux.kernel/1407184 +// +// In order to get all possible values for the same key, +// use GetAll. +func (opts Options) Get(key string) string { + for i := len(opts) - 1; i >= 0; i-- { + o := opts[i] + if o.IsKey(key) { + return o.Value + } + } + return "" +} + +// Has checks if an Option exist with the given key. +func (opts Options) Has(key string) bool { + for _, o := range opts { + if o.IsKey(key) { + return true + } + } + return false +} + +// GetAll returns all possible values for the same key. +func (opts Options) GetAll(key string) []string { + result := []string{} + for _, o := range opts { + if o.IsKey(key) { + result = append(result, o.Value) + } + } + return result +} + +func (opts Options) withoutOption(key string) Options { + result := Options{} + for _, o := range opts { + if !o.IsKey(key) { + result = append(result, o) + } + } + return result +} + +func (opts Options) withAddedOption(key string, value string) Options { + return append(opts, &Option{key, value}) +} + +func (opts Options) withSettedOption(key string, values ...string) Options { + var result Options + var added []string + for _, o := range opts { + if !o.IsKey(key) { + result = append(result, o) + continue + } + + if contains(values, o.Value) { + added = append(added, o.Value) + result = append(result, o) + continue + } + } + + for _, value := range values { + if contains(added, value) { + continue + } + + result = result.withAddedOption(key, value) + } + + return result +} + +func contains(haystack []string, needle string) bool { + for _, s := range haystack { + if s == needle { + return true + } + } + + return false +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/section.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/section.go new file mode 100644 index 0000000000..07f72f35af --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/section.go @@ -0,0 +1,181 @@ +package config + +import ( + "fmt" + "strings" +) + +// Section is the representation of a section inside git configuration files. +// Each Section contains Options that are used by both the Git plumbing +// and the porcelains. +// Sections can be further divided into subsections. To begin a subsection +// put its name in double quotes, separated by space from the section name, +// in the section header, like in the example below: +// +// [section "subsection"] +// +// All the other lines (and the remainder of the line after the section header) +// are recognized as option variables, in the form "name = value" (or just name, +// which is a short-hand to say that the variable is the boolean "true"). +// The variable names are case-insensitive, allow only alphanumeric characters +// and -, and must start with an alphabetic character: +// +// [section "subsection1"] +// option1 = value1 +// option2 +// [section "subsection2"] +// option3 = value2 +// +type Section struct { + Name string + Options Options + Subsections Subsections +} + +type Subsection struct { + Name string + Options Options +} + +type Sections []*Section + +func (s Sections) GoString() string { + var strs []string + for _, ss := range s { + strs = append(strs, fmt.Sprintf("%#v", ss)) + } + + return strings.Join(strs, ", ") +} + +type Subsections []*Subsection + +func (s Subsections) GoString() string { + var strs []string + for _, ss := range s { + strs = append(strs, fmt.Sprintf("%#v", ss)) + } + + return strings.Join(strs, ", ") +} + +// IsName checks if the name provided is equals to the Section name, case insensitive. +func (s *Section) IsName(name string) bool { + return strings.EqualFold(s.Name, name) +} + +// Subsection returns a Subsection from the specified Section. If the +// Subsection does not exists, new one is created and added to Section. +func (s *Section) Subsection(name string) *Subsection { + for i := len(s.Subsections) - 1; i >= 0; i-- { + ss := s.Subsections[i] + if ss.IsName(name) { + return ss + } + } + + ss := &Subsection{Name: name} + s.Subsections = append(s.Subsections, ss) + return ss +} + +// HasSubsection checks if the Section has a Subsection with the specified name. +func (s *Section) HasSubsection(name string) bool { + for _, ss := range s.Subsections { + if ss.IsName(name) { + return true + } + } + + return false +} + +// RemoveSubsection removes a subsection from a Section. +func (s *Section) RemoveSubsection(name string) *Section { + result := Subsections{} + for _, s := range s.Subsections { + if !s.IsName(name) { + result = append(result, s) + } + } + + s.Subsections = result + return s +} + +// Option return the value for the specified key. Empty string is returned if +// key does not exists. +func (s *Section) Option(key string) string { + return s.Options.Get(key) +} + +// OptionAll returns all possible values for an option with the specified key. +// If the option does not exists, an empty slice will be returned. +func (s *Section) OptionAll(key string) []string { + return s.Options.GetAll(key) +} + +// HasOption checks if the Section has an Option with the given key. +func (s *Section) HasOption(key string) bool { + return s.Options.Has(key) +} + +// AddOption adds a new Option to the Section. The updated Section is returned. +func (s *Section) AddOption(key string, value string) *Section { + s.Options = s.Options.withAddedOption(key, value) + return s +} + +// SetOption adds a new Option to the Section. If the option already exists, is replaced. +// The updated Section is returned. +func (s *Section) SetOption(key string, value string) *Section { + s.Options = s.Options.withSettedOption(key, value) + return s +} + +// Remove an option with the specified key. The updated Section is returned. +func (s *Section) RemoveOption(key string) *Section { + s.Options = s.Options.withoutOption(key) + return s +} + +// IsName checks if the name of the subsection is exactly the specified name. +func (s *Subsection) IsName(name string) bool { + return s.Name == name +} + +// Option returns an option with the specified key. If the option does not exists, +// empty spring will be returned. +func (s *Subsection) Option(key string) string { + return s.Options.Get(key) +} + +// OptionAll returns all possible values for an option with the specified key. +// If the option does not exists, an empty slice will be returned. +func (s *Subsection) OptionAll(key string) []string { + return s.Options.GetAll(key) +} + +// HasOption checks if the Subsection has an Option with the given key. +func (s *Subsection) HasOption(key string) bool { + return s.Options.Has(key) +} + +// AddOption adds a new Option to the Subsection. The updated Subsection is returned. +func (s *Subsection) AddOption(key string, value string) *Subsection { + s.Options = s.Options.withAddedOption(key, value) + return s +} + +// SetOption adds a new Option to the Subsection. If the option already exists, is replaced. +// The updated Subsection is returned. +func (s *Subsection) SetOption(key string, value ...string) *Subsection { + s.Options = s.Options.withSettedOption(key, value...) + return s +} + +// RemoveOption removes the option with the specified key. The updated Subsection is returned. +func (s *Subsection) RemoveOption(key string) *Subsection { + s.Options = s.Options.withoutOption(key) + return s +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/colorconfig.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/colorconfig.go new file mode 100644 index 0000000000..6fd4158462 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/colorconfig.go @@ -0,0 +1,97 @@ +package diff + +import "github.com/go-git/go-git/v5/plumbing/color" + +// A ColorKey is a key into a ColorConfig map and also equal to the key in the +// diff.color subsection of the config. See +// https://github.com/git/git/blob/v2.26.2/diff.c#L83-L106. +type ColorKey string + +// ColorKeys. +const ( + Context ColorKey = "context" + Meta ColorKey = "meta" + Frag ColorKey = "frag" + Old ColorKey = "old" + New ColorKey = "new" + Commit ColorKey = "commit" + Whitespace ColorKey = "whitespace" + Func ColorKey = "func" + OldMoved ColorKey = "oldMoved" + OldMovedAlternative ColorKey = "oldMovedAlternative" + OldMovedDimmed ColorKey = "oldMovedDimmed" + OldMovedAlternativeDimmed ColorKey = "oldMovedAlternativeDimmed" + NewMoved ColorKey = "newMoved" + NewMovedAlternative ColorKey = "newMovedAlternative" + NewMovedDimmed ColorKey = "newMovedDimmed" + NewMovedAlternativeDimmed ColorKey = "newMovedAlternativeDimmed" + ContextDimmed ColorKey = "contextDimmed" + OldDimmed ColorKey = "oldDimmed" + NewDimmed ColorKey = "newDimmed" + ContextBold ColorKey = "contextBold" + OldBold ColorKey = "oldBold" + NewBold ColorKey = "newBold" +) + +// A ColorConfig is a color configuration. A nil or empty ColorConfig +// corresponds to no color. +type ColorConfig map[ColorKey]string + +// A ColorConfigOption sets an option on a ColorConfig. +type ColorConfigOption func(ColorConfig) + +// WithColor sets the color for key. +func WithColor(key ColorKey, color string) ColorConfigOption { + return func(cc ColorConfig) { + cc[key] = color + } +} + +// defaultColorConfig is the default color configuration. See +// https://github.com/git/git/blob/v2.26.2/diff.c#L57-L81. +var defaultColorConfig = ColorConfig{ + Context: color.Normal, + Meta: color.Bold, + Frag: color.Cyan, + Old: color.Red, + New: color.Green, + Commit: color.Yellow, + Whitespace: color.BgRed, + Func: color.Normal, + OldMoved: color.BoldMagenta, + OldMovedAlternative: color.BoldBlue, + OldMovedDimmed: color.Faint, + OldMovedAlternativeDimmed: color.FaintItalic, + NewMoved: color.BoldCyan, + NewMovedAlternative: color.BoldYellow, + NewMovedDimmed: color.Faint, + NewMovedAlternativeDimmed: color.FaintItalic, + ContextDimmed: color.Faint, + OldDimmed: color.FaintRed, + NewDimmed: color.FaintGreen, + ContextBold: color.Bold, + OldBold: color.BoldRed, + NewBold: color.BoldGreen, +} + +// NewColorConfig returns a new ColorConfig. +func NewColorConfig(options ...ColorConfigOption) ColorConfig { + cc := make(ColorConfig) + for key, value := range defaultColorConfig { + cc[key] = value + } + for _, option := range options { + option(cc) + } + return cc +} + +// Reset returns the ANSI escape sequence to reset the color with key set from +// cc. If no color was set then no reset is needed so it returns the empty +// string. +func (cc ColorConfig) Reset(key ColorKey) string { + if cc[key] == "" { + return "" + } + return color.Reset +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/patch.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/patch.go new file mode 100644 index 0000000000..39a66a1a80 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/patch.go @@ -0,0 +1,58 @@ +package diff + +import ( + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/filemode" +) + +// Operation defines the operation of a diff item. +type Operation int + +const ( + // Equal item represents a equals diff. + Equal Operation = iota + // Add item represents an insert diff. + Add + // Delete item represents a delete diff. + Delete +) + +// Patch represents a collection of steps to transform several files. +type Patch interface { + // FilePatches returns a slice of patches per file. + FilePatches() []FilePatch + // Message returns an optional message that can be at the top of the + // Patch representation. + Message() string +} + +// FilePatch represents the necessary steps to transform one file to another. +type FilePatch interface { + // IsBinary returns true if this patch is representing a binary file. + IsBinary() bool + // Files returns the from and to Files, with all the necessary metadata to + // about them. If the patch creates a new file, "from" will be nil. + // If the patch deletes a file, "to" will be nil. + Files() (from, to File) + // Chunks returns a slice of ordered changes to transform "from" File to + // "to" File. If the file is a binary one, Chunks will be empty. + Chunks() []Chunk +} + +// File contains all the file metadata necessary to print some patch formats. +type File interface { + // Hash returns the File Hash. + Hash() plumbing.Hash + // Mode returns the FileMode. + Mode() filemode.FileMode + // Path returns the complete Path to the file, including the filename. + Path() string +} + +// Chunk represents a portion of a file transformation to another. +type Chunk interface { + // Content contains the portion of the file. + Content() string + // Type contains the Operation to do with this Chunk. + Type() Operation +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/unified_encoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/unified_encoder.go new file mode 100644 index 0000000000..413984aa54 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/unified_encoder.go @@ -0,0 +1,376 @@ +package diff + +import ( + "fmt" + "io" + "regexp" + "strconv" + "strings" + + "github.com/go-git/go-git/v5/plumbing" +) + +// DefaultContextLines is the default number of context lines. +const DefaultContextLines = 3 + +var ( + splitLinesRegexp = regexp.MustCompile(`[^\n]*(\n|$)`) + + operationChar = map[Operation]byte{ + Add: '+', + Delete: '-', + Equal: ' ', + } + + operationColorKey = map[Operation]ColorKey{ + Add: New, + Delete: Old, + Equal: Context, + } +) + +// UnifiedEncoder encodes an unified diff into the provided Writer. It does not +// support similarity index for renames or sorting hash representations. +type UnifiedEncoder struct { + io.Writer + + // contextLines is the count of unchanged lines that will appear surrounding + // a change. + contextLines int + + // colorConfig is the color configuration. The default is no color. + color ColorConfig +} + +// NewUnifiedEncoder returns a new UnifiedEncoder that writes to w. +func NewUnifiedEncoder(w io.Writer, contextLines int) *UnifiedEncoder { + return &UnifiedEncoder{ + Writer: w, + contextLines: contextLines, + } +} + +// SetColor sets e's color configuration and returns e. +func (e *UnifiedEncoder) SetColor(colorConfig ColorConfig) *UnifiedEncoder { + e.color = colorConfig + return e +} + +// Encode encodes patch. +func (e *UnifiedEncoder) Encode(patch Patch) error { + sb := &strings.Builder{} + + if message := patch.Message(); message != "" { + sb.WriteString(message) + if !strings.HasSuffix(message, "\n") { + sb.WriteByte('\n') + } + } + + for _, filePatch := range patch.FilePatches() { + e.writeFilePatchHeader(sb, filePatch) + g := newHunksGenerator(filePatch.Chunks(), e.contextLines) + for _, hunk := range g.Generate() { + hunk.writeTo(sb, e.color) + } + } + + _, err := e.Write([]byte(sb.String())) + return err +} + +func (e *UnifiedEncoder) writeFilePatchHeader(sb *strings.Builder, filePatch FilePatch) { + from, to := filePatch.Files() + if from == nil && to == nil { + return + } + isBinary := filePatch.IsBinary() + + var lines []string + switch { + case from != nil && to != nil: + hashEquals := from.Hash() == to.Hash() + lines = append(lines, + fmt.Sprintf("diff --git a/%s b/%s", from.Path(), to.Path()), + ) + if from.Mode() != to.Mode() { + lines = append(lines, + fmt.Sprintf("old mode %o", from.Mode()), + fmt.Sprintf("new mode %o", to.Mode()), + ) + } + if from.Path() != to.Path() { + lines = append(lines, + fmt.Sprintf("rename from %s", from.Path()), + fmt.Sprintf("rename to %s", to.Path()), + ) + } + if from.Mode() != to.Mode() && !hashEquals { + lines = append(lines, + fmt.Sprintf("index %s..%s", from.Hash(), to.Hash()), + ) + } else if !hashEquals { + lines = append(lines, + fmt.Sprintf("index %s..%s %o", from.Hash(), to.Hash(), from.Mode()), + ) + } + if !hashEquals { + lines = e.appendPathLines(lines, "a/"+from.Path(), "b/"+to.Path(), isBinary) + } + case from == nil: + lines = append(lines, + fmt.Sprintf("diff --git a/%s b/%s", to.Path(), to.Path()), + fmt.Sprintf("new file mode %o", to.Mode()), + fmt.Sprintf("index %s..%s", plumbing.ZeroHash, to.Hash()), + ) + lines = e.appendPathLines(lines, "/dev/null", "b/"+to.Path(), isBinary) + case to == nil: + lines = append(lines, + fmt.Sprintf("diff --git a/%s b/%s", from.Path(), from.Path()), + fmt.Sprintf("deleted file mode %o", from.Mode()), + fmt.Sprintf("index %s..%s", from.Hash(), plumbing.ZeroHash), + ) + lines = e.appendPathLines(lines, "a/"+from.Path(), "/dev/null", isBinary) + } + + sb.WriteString(e.color[Meta]) + sb.WriteString(lines[0]) + for _, line := range lines[1:] { + sb.WriteByte('\n') + sb.WriteString(line) + } + sb.WriteString(e.color.Reset(Meta)) + sb.WriteByte('\n') +} + +func (e *UnifiedEncoder) appendPathLines(lines []string, fromPath, toPath string, isBinary bool) []string { + if isBinary { + return append(lines, + fmt.Sprintf("Binary files %s and %s differ", fromPath, toPath), + ) + } + return append(lines, + fmt.Sprintf("--- %s", fromPath), + fmt.Sprintf("+++ %s", toPath), + ) +} + +type hunksGenerator struct { + fromLine, toLine int + ctxLines int + chunks []Chunk + current *hunk + hunks []*hunk + beforeContext, afterContext []string +} + +func newHunksGenerator(chunks []Chunk, ctxLines int) *hunksGenerator { + return &hunksGenerator{ + chunks: chunks, + ctxLines: ctxLines, + } +} + +func (g *hunksGenerator) Generate() []*hunk { + for i, chunk := range g.chunks { + lines := splitLines(chunk.Content()) + nLines := len(lines) + + switch chunk.Type() { + case Equal: + g.fromLine += nLines + g.toLine += nLines + g.processEqualsLines(lines, i) + case Delete: + if nLines != 0 { + g.fromLine++ + } + + g.processHunk(i, chunk.Type()) + g.fromLine += nLines - 1 + g.current.AddOp(chunk.Type(), lines...) + case Add: + if nLines != 0 { + g.toLine++ + } + g.processHunk(i, chunk.Type()) + g.toLine += nLines - 1 + g.current.AddOp(chunk.Type(), lines...) + } + + if i == len(g.chunks)-1 && g.current != nil { + g.hunks = append(g.hunks, g.current) + } + } + + return g.hunks +} + +func (g *hunksGenerator) processHunk(i int, op Operation) { + if g.current != nil { + return + } + + var ctxPrefix string + linesBefore := len(g.beforeContext) + if linesBefore > g.ctxLines { + ctxPrefix = g.beforeContext[linesBefore-g.ctxLines-1] + g.beforeContext = g.beforeContext[linesBefore-g.ctxLines:] + linesBefore = g.ctxLines + } + + g.current = &hunk{ctxPrefix: strings.TrimSuffix(ctxPrefix, "\n")} + g.current.AddOp(Equal, g.beforeContext...) + + switch op { + case Delete: + g.current.fromLine, g.current.toLine = + g.addLineNumbers(g.fromLine, g.toLine, linesBefore, i, Add) + case Add: + g.current.toLine, g.current.fromLine = + g.addLineNumbers(g.toLine, g.fromLine, linesBefore, i, Delete) + } + + g.beforeContext = nil +} + +// addLineNumbers obtains the line numbers in a new chunk. +func (g *hunksGenerator) addLineNumbers(la, lb int, linesBefore int, i int, op Operation) (cla, clb int) { + cla = la - linesBefore + // we need to search for a reference for the next diff + switch { + case linesBefore != 0 && g.ctxLines != 0: + if lb > g.ctxLines { + clb = lb - g.ctxLines + 1 + } else { + clb = 1 + } + case g.ctxLines == 0: + clb = lb + case i != len(g.chunks)-1: + next := g.chunks[i+1] + if next.Type() == op || next.Type() == Equal { + // this diff will be into this chunk + clb = lb + 1 + } + } + + return +} + +func (g *hunksGenerator) processEqualsLines(ls []string, i int) { + if g.current == nil { + g.beforeContext = append(g.beforeContext, ls...) + return + } + + g.afterContext = append(g.afterContext, ls...) + if len(g.afterContext) <= g.ctxLines*2 && i != len(g.chunks)-1 { + g.current.AddOp(Equal, g.afterContext...) + g.afterContext = nil + } else { + ctxLines := g.ctxLines + if ctxLines > len(g.afterContext) { + ctxLines = len(g.afterContext) + } + g.current.AddOp(Equal, g.afterContext[:ctxLines]...) + g.hunks = append(g.hunks, g.current) + + g.current = nil + g.beforeContext = g.afterContext[ctxLines:] + g.afterContext = nil + } +} + +func splitLines(s string) []string { + out := splitLinesRegexp.FindAllString(s, -1) + if out[len(out)-1] == "" { + out = out[:len(out)-1] + } + return out +} + +type hunk struct { + fromLine int + toLine int + + fromCount int + toCount int + + ctxPrefix string + ops []*op +} + +func (h *hunk) writeTo(sb *strings.Builder, color ColorConfig) { + sb.WriteString(color[Frag]) + sb.WriteString("@@ -") + + if h.fromCount == 1 { + sb.WriteString(strconv.Itoa(h.fromLine)) + } else { + sb.WriteString(strconv.Itoa(h.fromLine)) + sb.WriteByte(',') + sb.WriteString(strconv.Itoa(h.fromCount)) + } + + sb.WriteString(" +") + + if h.toCount == 1 { + sb.WriteString(strconv.Itoa(h.toLine)) + } else { + sb.WriteString(strconv.Itoa(h.toLine)) + sb.WriteByte(',') + sb.WriteString(strconv.Itoa(h.toCount)) + } + + sb.WriteString(" @@") + sb.WriteString(color.Reset(Frag)) + + if h.ctxPrefix != "" { + sb.WriteByte(' ') + sb.WriteString(color[Func]) + sb.WriteString(h.ctxPrefix) + sb.WriteString(color.Reset(Func)) + } + + sb.WriteByte('\n') + + for _, op := range h.ops { + op.writeTo(sb, color) + } +} + +func (h *hunk) AddOp(t Operation, ss ...string) { + n := len(ss) + switch t { + case Add: + h.toCount += n + case Delete: + h.fromCount += n + case Equal: + h.toCount += n + h.fromCount += n + } + + for _, s := range ss { + h.ops = append(h.ops, &op{s, t}) + } +} + +type op struct { + text string + t Operation +} + +func (o *op) writeTo(sb *strings.Builder, color ColorConfig) { + colorKey := operationColorKey[o.t] + sb.WriteString(color[colorKey]) + sb.WriteByte(operationChar[o.t]) + if strings.HasSuffix(o.text, "\n") { + sb.WriteString(strings.TrimSuffix(o.text, "\n")) + } else { + sb.WriteString(o.text + "\n\\ No newline at end of file") + } + sb.WriteString(color.Reset(colorKey)) + sb.WriteByte('\n') +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go new file mode 100644 index 0000000000..4a26325f9e --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go @@ -0,0 +1,136 @@ +package gitignore + +import ( + "bufio" + "bytes" + "io/ioutil" + "os" + "os/user" + "strings" + + "github.com/go-git/go-billy/v5" + "github.com/go-git/go-git/v5/plumbing/format/config" + gioutil "github.com/go-git/go-git/v5/utils/ioutil" +) + +const ( + commentPrefix = "#" + coreSection = "core" + excludesfile = "excludesfile" + gitDir = ".git" + gitignoreFile = ".gitignore" + gitconfigFile = ".gitconfig" + systemFile = "/etc/gitconfig" +) + +// readIgnoreFile reads a specific git ignore file. +func readIgnoreFile(fs billy.Filesystem, path []string, ignoreFile string) (ps []Pattern, err error) { + f, err := fs.Open(fs.Join(append(path, ignoreFile)...)) + if err == nil { + defer f.Close() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + s := scanner.Text() + if !strings.HasPrefix(s, commentPrefix) && len(strings.TrimSpace(s)) > 0 { + ps = append(ps, ParsePattern(s, path)) + } + } + } else if !os.IsNotExist(err) { + return nil, err + } + + return +} + +// ReadPatterns reads gitignore patterns recursively traversing through the directory +// structure. The result is in the ascending order of priority (last higher). +func ReadPatterns(fs billy.Filesystem, path []string) (ps []Pattern, err error) { + ps, _ = readIgnoreFile(fs, path, gitignoreFile) + + var fis []os.FileInfo + fis, err = fs.ReadDir(fs.Join(path...)) + if err != nil { + return + } + + for _, fi := range fis { + if fi.IsDir() && fi.Name() != gitDir { + var subps []Pattern + subps, err = ReadPatterns(fs, append(path, fi.Name())) + if err != nil { + return + } + + if len(subps) > 0 { + ps = append(ps, subps...) + } + } + } + + return +} + +func loadPatterns(fs billy.Filesystem, path string) (ps []Pattern, err error) { + f, err := fs.Open(path) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, err + } + + defer gioutil.CheckClose(f, &err) + + b, err := ioutil.ReadAll(f) + if err != nil { + return + } + + d := config.NewDecoder(bytes.NewBuffer(b)) + + raw := config.New() + if err = d.Decode(raw); err != nil { + return + } + + s := raw.Section(coreSection) + efo := s.Options.Get(excludesfile) + if efo == "" { + return nil, nil + } + + ps, err = readIgnoreFile(fs, nil, efo) + if os.IsNotExist(err) { + return nil, nil + } + + return +} + +// LoadGlobalPatterns loads gitignore patterns from from the gitignore file +// declared in a user's ~/.gitconfig file. If the ~/.gitconfig file does not +// exist the function will return nil. If the core.excludesfile property +// is not declared, the function will return nil. If the file pointed to by +// the core.excludesfile property does not exist, the function will return nil. +// +// The function assumes fs is rooted at the root filesystem. +func LoadGlobalPatterns(fs billy.Filesystem) (ps []Pattern, err error) { + usr, err := user.Current() + if err != nil { + return + } + + return loadPatterns(fs, fs.Join(usr.HomeDir, gitconfigFile)) +} + +// LoadSystemPatterns loads gitignore patterns from from the gitignore file +// declared in a system's /etc/gitconfig file. If the ~/.gitconfig file does +// not exist the function will return nil. If the core.excludesfile property +// is not declared, the function will return nil. If the file pointed to by +// the core.excludesfile property does not exist, the function will return nil. +// +// The function assumes fs is rooted at the root filesystem. +func LoadSystemPatterns(fs billy.Filesystem) (ps []Pattern, err error) { + return loadPatterns(fs, systemFile) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/doc.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/doc.go new file mode 100644 index 0000000000..eecd4baccb --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/doc.go @@ -0,0 +1,70 @@ +// Package gitignore implements matching file system paths to gitignore patterns that +// can be automatically read from a git repository tree in the order of definition +// priorities. It support all pattern formats as specified in the original gitignore +// documentation, copied below: +// +// Pattern format +// ============== +// +// - A blank line matches no files, so it can serve as a separator for readability. +// +// - A line starting with # serves as a comment. Put a backslash ("\") in front of +// the first hash for patterns that begin with a hash. +// +// - Trailing spaces are ignored unless they are quoted with backslash ("\"). +// +// - An optional prefix "!" which negates the pattern; any matching file excluded +// by a previous pattern will become included again. It is not possible to +// re-include a file if a parent directory of that file is excluded. +// Git doesn’t list excluded directories for performance reasons, so +// any patterns on contained files have no effect, no matter where they are +// defined. Put a backslash ("\") in front of the first "!" for patterns +// that begin with a literal "!", for example, "\!important!.txt". +// +// - If the pattern ends with a slash, it is removed for the purpose of the +// following description, but it would only find a match with a directory. +// In other words, foo/ will match a directory foo and paths underneath it, +// but will not match a regular file or a symbolic link foo (this is consistent +// with the way how pathspec works in general in Git). +// +// - If the pattern does not contain a slash /, Git treats it as a shell glob +// pattern and checks for a match against the pathname relative to the location +// of the .gitignore file (relative to the toplevel of the work tree if not +// from a .gitignore file). +// +// - Otherwise, Git treats the pattern as a shell glob suitable for consumption +// by fnmatch(3) with the FNM_PATHNAME flag: wildcards in the pattern will +// not match a / in the pathname. For example, "Documentation/*.html" matches +// "Documentation/git.html" but not "Documentation/ppc/ppc.html" or +// "tools/perf/Documentation/perf.html". +// +// - A leading slash matches the beginning of the pathname. For example, +// "/*.c" matches "cat-file.c" but not "mozilla-sha1/sha1.c". +// +// Two consecutive asterisks ("**") in patterns matched against full pathname +// may have special meaning: +// +// - A leading "**" followed by a slash means match in all directories. +// For example, "**/foo" matches file or directory "foo" anywhere, the same as +// pattern "foo". "**/foo/bar" matches file or directory "bar" +// anywhere that is directly under directory "foo". +// +// - A trailing "/**" matches everything inside. For example, "abc/**" matches +// all files inside directory "abc", relative to the location of the +// .gitignore file, with infinite depth. +// +// - A slash followed by two consecutive asterisks then a slash matches +// zero or more directories. For example, "a/**/b" matches "a/b", "a/x/b", +// "a/x/y/b" and so on. +// +// - Other consecutive asterisks are considered invalid. +// +// Copyright and license +// ===================== +// +// Copyright (c) Oleg Sklyar, Silvertern and source{d} +// +// The package code was donated to source{d} to include, modify and develop +// further as a part of the `go-git` project, release it on the license of +// the whole project or delete it from the project. +package gitignore diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/matcher.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/matcher.go new file mode 100644 index 0000000000..bd1e9e2d4c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/matcher.go @@ -0,0 +1,30 @@ +package gitignore + +// Matcher defines a global multi-pattern matcher for gitignore patterns +type Matcher interface { + // Match matches patterns in the order of priorities. As soon as an inclusion or + // exclusion is found, not further matching is performed. + Match(path []string, isDir bool) bool +} + +// NewMatcher constructs a new global matcher. Patterns must be given in the order of +// increasing priority. That is most generic settings files first, then the content of +// the repo .gitignore, then content of .gitignore down the path or the repo and then +// the content command line arguments. +func NewMatcher(ps []Pattern) Matcher { + return &matcher{ps} +} + +type matcher struct { + patterns []Pattern +} + +func (m *matcher) Match(path []string, isDir bool) bool { + n := len(m.patterns) + for i := n - 1; i >= 0; i-- { + if match := m.patterns[i].Match(path, isDir); match > NoMatch { + return match == Exclude + } + } + return false +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/pattern.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/pattern.go new file mode 100644 index 0000000000..098cb50212 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/pattern.go @@ -0,0 +1,153 @@ +package gitignore + +import ( + "path/filepath" + "strings" +) + +// MatchResult defines outcomes of a match, no match, exclusion or inclusion. +type MatchResult int + +const ( + // NoMatch defines the no match outcome of a match check + NoMatch MatchResult = iota + // Exclude defines an exclusion of a file as a result of a match check + Exclude + // Include defines an explicit inclusion of a file as a result of a match check + Include +) + +const ( + inclusionPrefix = "!" + zeroToManyDirs = "**" + patternDirSep = "/" +) + +// Pattern defines a single gitignore pattern. +type Pattern interface { + // Match matches the given path to the pattern. + Match(path []string, isDir bool) MatchResult +} + +type pattern struct { + domain []string + pattern []string + inclusion bool + dirOnly bool + isGlob bool +} + +// ParsePattern parses a gitignore pattern string into the Pattern structure. +func ParsePattern(p string, domain []string) Pattern { + res := pattern{domain: domain} + + if strings.HasPrefix(p, inclusionPrefix) { + res.inclusion = true + p = p[1:] + } + + if !strings.HasSuffix(p, "\\ ") { + p = strings.TrimRight(p, " ") + } + + if strings.HasSuffix(p, patternDirSep) { + res.dirOnly = true + p = p[:len(p)-1] + } + + if strings.Contains(p, patternDirSep) { + res.isGlob = true + } + + res.pattern = strings.Split(p, patternDirSep) + return &res +} + +func (p *pattern) Match(path []string, isDir bool) MatchResult { + if len(path) <= len(p.domain) { + return NoMatch + } + for i, e := range p.domain { + if path[i] != e { + return NoMatch + } + } + + path = path[len(p.domain):] + if p.isGlob && !p.globMatch(path, isDir) { + return NoMatch + } else if !p.isGlob && !p.simpleNameMatch(path, isDir) { + return NoMatch + } + + if p.inclusion { + return Include + } else { + return Exclude + } +} + +func (p *pattern) simpleNameMatch(path []string, isDir bool) bool { + for i, name := range path { + if match, err := filepath.Match(p.pattern[0], name); err != nil { + return false + } else if !match { + continue + } + if p.dirOnly && !isDir && i == len(path)-1 { + return false + } + return true + } + return false +} + +func (p *pattern) globMatch(path []string, isDir bool) bool { + matched := false + canTraverse := false + for i, pattern := range p.pattern { + if pattern == "" { + canTraverse = false + continue + } + if pattern == zeroToManyDirs { + if i == len(p.pattern)-1 { + break + } + canTraverse = true + continue + } + if strings.Contains(pattern, zeroToManyDirs) { + return false + } + if len(path) == 0 { + return false + } + if canTraverse { + canTraverse = false + for len(path) > 0 { + e := path[0] + path = path[1:] + if match, err := filepath.Match(pattern, e); err != nil { + return false + } else if match { + matched = true + break + } else if len(path) == 0 { + // if nothing left then fail + matched = false + } + } + } else { + if match, err := filepath.Match(pattern, path[0]); err != nil || !match { + return false + } + matched = true + path = path[1:] + } + } + if matched && p.dirOnly && !isDir && len(path) == 0 { + matched = false + } + return matched +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/decoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/decoder.go new file mode 100644 index 0000000000..7768bd6505 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/decoder.go @@ -0,0 +1,177 @@ +package idxfile + +import ( + "bufio" + "bytes" + "errors" + "io" + + "github.com/go-git/go-git/v5/utils/binary" +) + +var ( + // ErrUnsupportedVersion is returned by Decode when the idx file version + // is not supported. + ErrUnsupportedVersion = errors.New("Unsupported version") + // ErrMalformedIdxFile is returned by Decode when the idx file is corrupted. + ErrMalformedIdxFile = errors.New("Malformed IDX file") +) + +const ( + fanout = 256 + objectIDLength = 20 +) + +// Decoder reads and decodes idx files from an input stream. +type Decoder struct { + *bufio.Reader +} + +// NewDecoder builds a new idx stream decoder, that reads from r. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{bufio.NewReader(r)} +} + +// Decode reads from the stream and decode the content into the MemoryIndex struct. +func (d *Decoder) Decode(idx *MemoryIndex) error { + if err := validateHeader(d); err != nil { + return err + } + + flow := []func(*MemoryIndex, io.Reader) error{ + readVersion, + readFanout, + readObjectNames, + readCRC32, + readOffsets, + readChecksums, + } + + for _, f := range flow { + if err := f(idx, d); err != nil { + return err + } + } + + return nil +} + +func validateHeader(r io.Reader) error { + var h = make([]byte, 4) + if _, err := io.ReadFull(r, h); err != nil { + return err + } + + if !bytes.Equal(h, idxHeader) { + return ErrMalformedIdxFile + } + + return nil +} + +func readVersion(idx *MemoryIndex, r io.Reader) error { + v, err := binary.ReadUint32(r) + if err != nil { + return err + } + + if v > VersionSupported { + return ErrUnsupportedVersion + } + + idx.Version = v + return nil +} + +func readFanout(idx *MemoryIndex, r io.Reader) error { + for k := 0; k < fanout; k++ { + n, err := binary.ReadUint32(r) + if err != nil { + return err + } + + idx.Fanout[k] = n + idx.FanoutMapping[k] = noMapping + } + + return nil +} + +func readObjectNames(idx *MemoryIndex, r io.Reader) error { + for k := 0; k < fanout; k++ { + var buckets uint32 + if k == 0 { + buckets = idx.Fanout[k] + } else { + buckets = idx.Fanout[k] - idx.Fanout[k-1] + } + + if buckets == 0 { + continue + } + + idx.FanoutMapping[k] = len(idx.Names) + + nameLen := int(buckets * objectIDLength) + bin := make([]byte, nameLen) + if _, err := io.ReadFull(r, bin); err != nil { + return err + } + + idx.Names = append(idx.Names, bin) + idx.Offset32 = append(idx.Offset32, make([]byte, buckets*4)) + idx.CRC32 = append(idx.CRC32, make([]byte, buckets*4)) + } + + return nil +} + +func readCRC32(idx *MemoryIndex, r io.Reader) error { + for k := 0; k < fanout; k++ { + if pos := idx.FanoutMapping[k]; pos != noMapping { + if _, err := io.ReadFull(r, idx.CRC32[pos]); err != nil { + return err + } + } + } + + return nil +} + +func readOffsets(idx *MemoryIndex, r io.Reader) error { + var o64cnt int + for k := 0; k < fanout; k++ { + if pos := idx.FanoutMapping[k]; pos != noMapping { + if _, err := io.ReadFull(r, idx.Offset32[pos]); err != nil { + return err + } + + for p := 0; p < len(idx.Offset32[pos]); p += 4 { + if idx.Offset32[pos][p]&(byte(1)<<7) > 0 { + o64cnt++ + } + } + } + } + + if o64cnt > 0 { + idx.Offset64 = make([]byte, o64cnt*8) + if _, err := io.ReadFull(r, idx.Offset64); err != nil { + return err + } + } + + return nil +} + +func readChecksums(idx *MemoryIndex, r io.Reader) error { + if _, err := io.ReadFull(r, idx.PackfileChecksum[:]); err != nil { + return err + } + + if _, err := io.ReadFull(r, idx.IdxChecksum[:]); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/doc.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/doc.go new file mode 100644 index 0000000000..1e628ab4a5 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/doc.go @@ -0,0 +1,128 @@ +// Package idxfile implements encoding and decoding of packfile idx files. +// +// == Original (version 1) pack-*.idx files have the following format: +// +// - The header consists of 256 4-byte network byte order +// integers. N-th entry of this table records the number of +// objects in the corresponding pack, the first byte of whose +// object name is less than or equal to N. This is called the +// 'first-level fan-out' table. +// +// - The header is followed by sorted 24-byte entries, one entry +// per object in the pack. Each entry is: +// +// 4-byte network byte order integer, recording where the +// object is stored in the packfile as the offset from the +// beginning. +// +// 20-byte object name. +// +// - The file is concluded with a trailer: +// +// A copy of the 20-byte SHA1 checksum at the end of +// corresponding packfile. +// +// 20-byte SHA1-checksum of all of the above. +// +// Pack Idx file: +// +// -- +--------------------------------+ +// fanout | fanout[0] = 2 (for example) |-. +// table +--------------------------------+ | +// | fanout[1] | | +// +--------------------------------+ | +// | fanout[2] | | +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | +// | fanout[255] = total objects |---. +// -- +--------------------------------+ | | +// main | offset | | | +// index | object name 00XXXXXXXXXXXXXXXX | | | +// tab +--------------------------------+ | | +// | offset | | | +// | object name 00XXXXXXXXXXXXXXXX | | | +// +--------------------------------+<+ | +// .-| offset | | +// | | object name 01XXXXXXXXXXXXXXXX | | +// | +--------------------------------+ | +// | | offset | | +// | | object name 01XXXXXXXXXXXXXXXX | | +// | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | +// | | offset | | +// | | object name FFXXXXXXXXXXXXXXXX | | +// --| +--------------------------------+<--+ +// trailer | | packfile checksum | +// | +--------------------------------+ +// | | idxfile checksum | +// | +--------------------------------+ +// .---------. +// | +// Pack file entry: <+ +// +// packed object header: +// 1-byte size extension bit (MSB) +// type (next 3 bit) +// size0 (lower 4-bit) +// n-byte sizeN (as long as MSB is set, each 7-bit) +// size0..sizeN form 4+7+7+..+7 bit integer, size0 +// is the least significant part, and sizeN is the +// most significant part. +// packed object data: +// If it is not DELTA, then deflated bytes (the size above +// is the size before compression). +// If it is REF_DELTA, then +// 20-byte base object name SHA1 (the size above is the +// size of the delta data that follows). +// delta data, deflated. +// If it is OFS_DELTA, then +// n-byte offset (see below) interpreted as a negative +// offset from the type-byte of the header of the +// ofs-delta entry (the size above is the size of +// the delta data that follows). +// delta data, deflated. +// +// offset encoding: +// n bytes with MSB set in all but the last one. +// The offset is then the number constructed by +// concatenating the lower 7 bit of each byte, and +// for n >= 2 adding 2^7 + 2^14 + ... + 2^(7*(n-1)) +// to the result. +// +// == Version 2 pack-*.idx files support packs larger than 4 GiB, and +// have some other reorganizations. They have the format: +// +// - A 4-byte magic number '\377tOc' which is an unreasonable +// fanout[0] value. +// +// - A 4-byte version number (= 2) +// +// - A 256-entry fan-out table just like v1. +// +// - A table of sorted 20-byte SHA1 object names. These are +// packed together without offset values to reduce the cache +// footprint of the binary search for a specific object name. +// +// - A table of 4-byte CRC32 values of the packed object data. +// This is new in v2 so compressed data can be copied directly +// from pack to pack during repacking without undetected +// data corruption. +// +// - A table of 4-byte offset values (in network byte order). +// These are usually 31-bit pack file offsets, but large +// offsets are encoded as an index into the next table with +// the msbit set. +// +// - A table of 8-byte offset entries (empty for pack files less +// than 2 GiB). Pack files are organized with heavily used +// objects toward the front, so most object references should +// not need to refer to this table. +// +// - The same trailer as a v1 pack file: +// +// A copy of the 20-byte SHA1 checksum at the end of +// corresponding packfile. +// +// 20-byte SHA1-checksum of all of the above. +// +// Source: +// https://www.kernel.org/pub/software/scm/git/docs/v1.7.5/technical/pack-format.txt +package idxfile diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/encoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/encoder.go new file mode 100644 index 0000000000..26b2e4d6b5 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/encoder.go @@ -0,0 +1,142 @@ +package idxfile + +import ( + "crypto/sha1" + "hash" + "io" + + "github.com/go-git/go-git/v5/utils/binary" +) + +// Encoder writes MemoryIndex structs to an output stream. +type Encoder struct { + io.Writer + hash hash.Hash +} + +// NewEncoder returns a new stream encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + h := sha1.New() + mw := io.MultiWriter(w, h) + return &Encoder{mw, h} +} + +// Encode encodes an MemoryIndex to the encoder writer. +func (e *Encoder) Encode(idx *MemoryIndex) (int, error) { + flow := []func(*MemoryIndex) (int, error){ + e.encodeHeader, + e.encodeFanout, + e.encodeHashes, + e.encodeCRC32, + e.encodeOffsets, + e.encodeChecksums, + } + + sz := 0 + for _, f := range flow { + i, err := f(idx) + sz += i + + if err != nil { + return sz, err + } + } + + return sz, nil +} + +func (e *Encoder) encodeHeader(idx *MemoryIndex) (int, error) { + c, err := e.Write(idxHeader) + if err != nil { + return c, err + } + + return c + 4, binary.WriteUint32(e, idx.Version) +} + +func (e *Encoder) encodeFanout(idx *MemoryIndex) (int, error) { + for _, c := range idx.Fanout { + if err := binary.WriteUint32(e, c); err != nil { + return 0, err + } + } + + return fanout * 4, nil +} + +func (e *Encoder) encodeHashes(idx *MemoryIndex) (int, error) { + var size int + for k := 0; k < fanout; k++ { + pos := idx.FanoutMapping[k] + if pos == noMapping { + continue + } + + n, err := e.Write(idx.Names[pos]) + if err != nil { + return size, err + } + size += n + } + return size, nil +} + +func (e *Encoder) encodeCRC32(idx *MemoryIndex) (int, error) { + var size int + for k := 0; k < fanout; k++ { + pos := idx.FanoutMapping[k] + if pos == noMapping { + continue + } + + n, err := e.Write(idx.CRC32[pos]) + if err != nil { + return size, err + } + + size += n + } + + return size, nil +} + +func (e *Encoder) encodeOffsets(idx *MemoryIndex) (int, error) { + var size int + for k := 0; k < fanout; k++ { + pos := idx.FanoutMapping[k] + if pos == noMapping { + continue + } + + n, err := e.Write(idx.Offset32[pos]) + if err != nil { + return size, err + } + + size += n + } + + if len(idx.Offset64) > 0 { + n, err := e.Write(idx.Offset64) + if err != nil { + return size, err + } + + size += n + } + + return size, nil +} + +func (e *Encoder) encodeChecksums(idx *MemoryIndex) (int, error) { + if _, err := e.Write(idx.PackfileChecksum[:]); err != nil { + return 0, err + } + + copy(idx.IdxChecksum[:], e.hash.Sum(nil)[:20]) + if _, err := e.Write(idx.IdxChecksum[:]); err != nil { + return 0, err + } + + return 40, nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/idxfile.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/idxfile.go new file mode 100644 index 0000000000..64dd8dcef4 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/idxfile.go @@ -0,0 +1,346 @@ +package idxfile + +import ( + "bytes" + "io" + "sort" + + encbin "encoding/binary" + + "github.com/go-git/go-git/v5/plumbing" +) + +const ( + // VersionSupported is the only idx version supported. + VersionSupported = 2 + + noMapping = -1 +) + +var ( + idxHeader = []byte{255, 't', 'O', 'c'} +) + +// Index represents an index of a packfile. +type Index interface { + // Contains checks whether the given hash is in the index. + Contains(h plumbing.Hash) (bool, error) + // FindOffset finds the offset in the packfile for the object with + // the given hash. + FindOffset(h plumbing.Hash) (int64, error) + // FindCRC32 finds the CRC32 of the object with the given hash. + FindCRC32(h plumbing.Hash) (uint32, error) + // FindHash finds the hash for the object with the given offset. + FindHash(o int64) (plumbing.Hash, error) + // Count returns the number of entries in the index. + Count() (int64, error) + // Entries returns an iterator to retrieve all index entries. + Entries() (EntryIter, error) + // EntriesByOffset returns an iterator to retrieve all index entries ordered + // by offset. + EntriesByOffset() (EntryIter, error) +} + +// MemoryIndex is the in memory representation of an idx file. +type MemoryIndex struct { + Version uint32 + Fanout [256]uint32 + // FanoutMapping maps the position in the fanout table to the position + // in the Names, Offset32 and CRC32 slices. This improves the memory + // usage by not needing an array with unnecessary empty slots. + FanoutMapping [256]int + Names [][]byte + Offset32 [][]byte + CRC32 [][]byte + Offset64 []byte + PackfileChecksum [20]byte + IdxChecksum [20]byte + + offsetHash map[int64]plumbing.Hash + offsetHashIsFull bool +} + +var _ Index = (*MemoryIndex)(nil) + +// NewMemoryIndex returns an instance of a new MemoryIndex. +func NewMemoryIndex() *MemoryIndex { + return &MemoryIndex{} +} + +func (idx *MemoryIndex) findHashIndex(h plumbing.Hash) (int, bool) { + k := idx.FanoutMapping[h[0]] + if k == noMapping { + return 0, false + } + + if len(idx.Names) <= k { + return 0, false + } + + data := idx.Names[k] + high := uint64(len(idx.Offset32[k])) >> 2 + if high == 0 { + return 0, false + } + + low := uint64(0) + for { + mid := (low + high) >> 1 + offset := mid * objectIDLength + + cmp := bytes.Compare(h[:], data[offset:offset+objectIDLength]) + if cmp < 0 { + high = mid + } else if cmp == 0 { + return int(mid), true + } else { + low = mid + 1 + } + + if low >= high { + break + } + } + + return 0, false +} + +// Contains implements the Index interface. +func (idx *MemoryIndex) Contains(h plumbing.Hash) (bool, error) { + _, ok := idx.findHashIndex(h) + return ok, nil +} + +// FindOffset implements the Index interface. +func (idx *MemoryIndex) FindOffset(h plumbing.Hash) (int64, error) { + if len(idx.FanoutMapping) <= int(h[0]) { + return 0, plumbing.ErrObjectNotFound + } + + k := idx.FanoutMapping[h[0]] + i, ok := idx.findHashIndex(h) + if !ok { + return 0, plumbing.ErrObjectNotFound + } + + offset := idx.getOffset(k, i) + + if !idx.offsetHashIsFull { + // Save the offset for reverse lookup + if idx.offsetHash == nil { + idx.offsetHash = make(map[int64]plumbing.Hash) + } + idx.offsetHash[int64(offset)] = h + } + + return int64(offset), nil +} + +const isO64Mask = uint64(1) << 31 + +func (idx *MemoryIndex) getOffset(firstLevel, secondLevel int) uint64 { + offset := secondLevel << 2 + ofs := encbin.BigEndian.Uint32(idx.Offset32[firstLevel][offset : offset+4]) + + if (uint64(ofs) & isO64Mask) != 0 { + offset := 8 * (uint64(ofs) & ^isO64Mask) + n := encbin.BigEndian.Uint64(idx.Offset64[offset : offset+8]) + return n + } + + return uint64(ofs) +} + +// FindCRC32 implements the Index interface. +func (idx *MemoryIndex) FindCRC32(h plumbing.Hash) (uint32, error) { + k := idx.FanoutMapping[h[0]] + i, ok := idx.findHashIndex(h) + if !ok { + return 0, plumbing.ErrObjectNotFound + } + + return idx.getCRC32(k, i), nil +} + +func (idx *MemoryIndex) getCRC32(firstLevel, secondLevel int) uint32 { + offset := secondLevel << 2 + return encbin.BigEndian.Uint32(idx.CRC32[firstLevel][offset : offset+4]) +} + +// FindHash implements the Index interface. +func (idx *MemoryIndex) FindHash(o int64) (plumbing.Hash, error) { + var hash plumbing.Hash + var ok bool + + if idx.offsetHash != nil { + if hash, ok = idx.offsetHash[o]; ok { + return hash, nil + } + } + + // Lazily generate the reverse offset/hash map if required. + if !idx.offsetHashIsFull || idx.offsetHash == nil { + if err := idx.genOffsetHash(); err != nil { + return plumbing.ZeroHash, err + } + + hash, ok = idx.offsetHash[o] + } + + if !ok { + return plumbing.ZeroHash, plumbing.ErrObjectNotFound + } + + return hash, nil +} + +// genOffsetHash generates the offset/hash mapping for reverse search. +func (idx *MemoryIndex) genOffsetHash() error { + count, err := idx.Count() + if err != nil { + return err + } + + idx.offsetHash = make(map[int64]plumbing.Hash, count) + idx.offsetHashIsFull = true + + var hash plumbing.Hash + i := uint32(0) + for firstLevel, fanoutValue := range idx.Fanout { + mappedFirstLevel := idx.FanoutMapping[firstLevel] + for secondLevel := uint32(0); i < fanoutValue; i++ { + copy(hash[:], idx.Names[mappedFirstLevel][secondLevel*objectIDLength:]) + offset := int64(idx.getOffset(mappedFirstLevel, int(secondLevel))) + idx.offsetHash[offset] = hash + secondLevel++ + } + } + + return nil +} + +// Count implements the Index interface. +func (idx *MemoryIndex) Count() (int64, error) { + return int64(idx.Fanout[fanout-1]), nil +} + +// Entries implements the Index interface. +func (idx *MemoryIndex) Entries() (EntryIter, error) { + return &idxfileEntryIter{idx, 0, 0, 0}, nil +} + +// EntriesByOffset implements the Index interface. +func (idx *MemoryIndex) EntriesByOffset() (EntryIter, error) { + count, err := idx.Count() + if err != nil { + return nil, err + } + + iter := &idxfileEntryOffsetIter{ + entries: make(entriesByOffset, count), + } + + entries, err := idx.Entries() + if err != nil { + return nil, err + } + + for pos := 0; int64(pos) < count; pos++ { + entry, err := entries.Next() + if err != nil { + return nil, err + } + + iter.entries[pos] = entry + } + + sort.Sort(iter.entries) + + return iter, nil +} + +// EntryIter is an iterator that will return the entries in a packfile index. +type EntryIter interface { + // Next returns the next entry in the packfile index. + Next() (*Entry, error) + // Close closes the iterator. + Close() error +} + +type idxfileEntryIter struct { + idx *MemoryIndex + total int + firstLevel, secondLevel int +} + +func (i *idxfileEntryIter) Next() (*Entry, error) { + for { + if i.firstLevel >= fanout { + return nil, io.EOF + } + + if i.total >= int(i.idx.Fanout[i.firstLevel]) { + i.firstLevel++ + i.secondLevel = 0 + continue + } + + mappedFirstLevel := i.idx.FanoutMapping[i.firstLevel] + entry := new(Entry) + copy(entry.Hash[:], i.idx.Names[mappedFirstLevel][i.secondLevel*objectIDLength:]) + entry.Offset = i.idx.getOffset(mappedFirstLevel, i.secondLevel) + entry.CRC32 = i.idx.getCRC32(mappedFirstLevel, i.secondLevel) + + i.secondLevel++ + i.total++ + + return entry, nil + } +} + +func (i *idxfileEntryIter) Close() error { + i.firstLevel = fanout + return nil +} + +// Entry is the in memory representation of an object entry in the idx file. +type Entry struct { + Hash plumbing.Hash + CRC32 uint32 + Offset uint64 +} + +type idxfileEntryOffsetIter struct { + entries entriesByOffset + pos int +} + +func (i *idxfileEntryOffsetIter) Next() (*Entry, error) { + if i.pos >= len(i.entries) { + return nil, io.EOF + } + + entry := i.entries[i.pos] + i.pos++ + + return entry, nil +} + +func (i *idxfileEntryOffsetIter) Close() error { + i.pos = len(i.entries) + 1 + return nil +} + +type entriesByOffset []*Entry + +func (o entriesByOffset) Len() int { + return len(o) +} + +func (o entriesByOffset) Less(i int, j int) bool { + return o[i].Offset < o[j].Offset +} + +func (o entriesByOffset) Swap(i int, j int) { + o[i], o[j] = o[j], o[i] +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/writer.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/writer.go new file mode 100644 index 0000000000..daa160502a --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/writer.go @@ -0,0 +1,186 @@ +package idxfile + +import ( + "bytes" + "fmt" + "math" + "sort" + "sync" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/utils/binary" +) + +// objects implements sort.Interface and uses hash as sorting key. +type objects []Entry + +// Writer implements a packfile Observer interface and is used to generate +// indexes. +type Writer struct { + m sync.Mutex + + count uint32 + checksum plumbing.Hash + objects objects + offset64 uint32 + finished bool + index *MemoryIndex + added map[plumbing.Hash]struct{} +} + +// Index returns a previously created MemoryIndex or creates a new one if +// needed. +func (w *Writer) Index() (*MemoryIndex, error) { + w.m.Lock() + defer w.m.Unlock() + + if w.index == nil { + return w.createIndex() + } + + return w.index, nil +} + +// Add appends new object data. +func (w *Writer) Add(h plumbing.Hash, pos uint64, crc uint32) { + w.m.Lock() + defer w.m.Unlock() + + if w.added == nil { + w.added = make(map[plumbing.Hash]struct{}) + } + + if _, ok := w.added[h]; !ok { + w.added[h] = struct{}{} + w.objects = append(w.objects, Entry{h, crc, pos}) + } + +} + +func (w *Writer) Finished() bool { + return w.finished +} + +// OnHeader implements packfile.Observer interface. +func (w *Writer) OnHeader(count uint32) error { + w.count = count + w.objects = make(objects, 0, count) + return nil +} + +// OnInflatedObjectHeader implements packfile.Observer interface. +func (w *Writer) OnInflatedObjectHeader(t plumbing.ObjectType, objSize int64, pos int64) error { + return nil +} + +// OnInflatedObjectContent implements packfile.Observer interface. +func (w *Writer) OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32, _ []byte) error { + w.Add(h, uint64(pos), crc) + return nil +} + +// OnFooter implements packfile.Observer interface. +func (w *Writer) OnFooter(h plumbing.Hash) error { + w.checksum = h + w.finished = true + _, err := w.createIndex() + if err != nil { + return err + } + + return nil +} + +// creatIndex returns a filled MemoryIndex with the information filled by +// the observer callbacks. +func (w *Writer) createIndex() (*MemoryIndex, error) { + if !w.finished { + return nil, fmt.Errorf("the index still hasn't finished building") + } + + idx := new(MemoryIndex) + w.index = idx + + sort.Sort(w.objects) + + // unmap all fans by default + for i := range idx.FanoutMapping { + idx.FanoutMapping[i] = noMapping + } + + buf := new(bytes.Buffer) + + last := -1 + bucket := -1 + for i, o := range w.objects { + fan := o.Hash[0] + + // fill the gaps between fans + for j := last + 1; j < int(fan); j++ { + idx.Fanout[j] = uint32(i) + } + + // update the number of objects for this position + idx.Fanout[fan] = uint32(i + 1) + + // we move from one bucket to another, update counters and allocate + // memory + if last != int(fan) { + bucket++ + idx.FanoutMapping[fan] = bucket + last = int(fan) + + idx.Names = append(idx.Names, make([]byte, 0)) + idx.Offset32 = append(idx.Offset32, make([]byte, 0)) + idx.CRC32 = append(idx.CRC32, make([]byte, 0)) + } + + idx.Names[bucket] = append(idx.Names[bucket], o.Hash[:]...) + + offset := o.Offset + if offset > math.MaxInt32 { + offset = w.addOffset64(offset) + } + + buf.Truncate(0) + binary.WriteUint32(buf, uint32(offset)) + idx.Offset32[bucket] = append(idx.Offset32[bucket], buf.Bytes()...) + + buf.Truncate(0) + binary.WriteUint32(buf, o.CRC32) + idx.CRC32[bucket] = append(idx.CRC32[bucket], buf.Bytes()...) + } + + for j := last + 1; j < 256; j++ { + idx.Fanout[j] = uint32(len(w.objects)) + } + + idx.Version = VersionSupported + idx.PackfileChecksum = w.checksum + + return idx, nil +} + +func (w *Writer) addOffset64(pos uint64) uint64 { + buf := new(bytes.Buffer) + binary.WriteUint64(buf, pos) + w.index.Offset64 = append(w.index.Offset64, buf.Bytes()...) + + index := uint64(w.offset64 | (1 << 31)) + w.offset64++ + + return index +} + +func (o objects) Len() int { + return len(o) +} + +func (o objects) Less(i int, j int) bool { + cmp := bytes.Compare(o[i].Hash[:], o[j].Hash[:]) + return cmp < 0 +} + +func (o objects) Swap(i int, j int) { + o[i], o[j] = o[j], o[i] +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/decoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/decoder.go new file mode 100644 index 0000000000..036b6365e6 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/decoder.go @@ -0,0 +1,479 @@ +package index + +import ( + "bufio" + "bytes" + "crypto/sha1" + "errors" + "hash" + "io" + "io/ioutil" + "strconv" + "time" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/utils/binary" +) + +var ( + // DecodeVersionSupported is the range of supported index versions + DecodeVersionSupported = struct{ Min, Max uint32 }{Min: 2, Max: 4} + + // ErrMalformedSignature is returned by Decode when the index header file is + // malformed + ErrMalformedSignature = errors.New("malformed index signature file") + // ErrInvalidChecksum is returned by Decode if the SHA1 hash mismatch with + // the read content + ErrInvalidChecksum = errors.New("invalid checksum") + + errUnknownExtension = errors.New("unknown extension") +) + +const ( + entryHeaderLength = 62 + entryExtended = 0x4000 + entryValid = 0x8000 + nameMask = 0xfff + intentToAddMask = 1 << 13 + skipWorkTreeMask = 1 << 14 +) + +// A Decoder reads and decodes index files from an input stream. +type Decoder struct { + r io.Reader + hash hash.Hash + lastEntry *Entry + + extReader *bufio.Reader +} + +// NewDecoder returns a new decoder that reads from r. +func NewDecoder(r io.Reader) *Decoder { + h := sha1.New() + return &Decoder{ + r: io.TeeReader(r, h), + hash: h, + extReader: bufio.NewReader(nil), + } +} + +// Decode reads the whole index object from its input and stores it in the +// value pointed to by idx. +func (d *Decoder) Decode(idx *Index) error { + var err error + idx.Version, err = validateHeader(d.r) + if err != nil { + return err + } + + entryCount, err := binary.ReadUint32(d.r) + if err != nil { + return err + } + + if err := d.readEntries(idx, int(entryCount)); err != nil { + return err + } + + return d.readExtensions(idx) +} + +func (d *Decoder) readEntries(idx *Index, count int) error { + for i := 0; i < count; i++ { + e, err := d.readEntry(idx) + if err != nil { + return err + } + + d.lastEntry = e + idx.Entries = append(idx.Entries, e) + } + + return nil +} + +func (d *Decoder) readEntry(idx *Index) (*Entry, error) { + e := &Entry{} + + var msec, mnsec, sec, nsec uint32 + var flags uint16 + + flow := []interface{}{ + &sec, &nsec, + &msec, &mnsec, + &e.Dev, + &e.Inode, + &e.Mode, + &e.UID, + &e.GID, + &e.Size, + &e.Hash, + &flags, + } + + if err := binary.Read(d.r, flow...); err != nil { + return nil, err + } + + read := entryHeaderLength + + if sec != 0 || nsec != 0 { + e.CreatedAt = time.Unix(int64(sec), int64(nsec)) + } + + if msec != 0 || mnsec != 0 { + e.ModifiedAt = time.Unix(int64(msec), int64(mnsec)) + } + + e.Stage = Stage(flags>>12) & 0x3 + + if flags&entryExtended != 0 { + extended, err := binary.ReadUint16(d.r) + if err != nil { + return nil, err + } + + read += 2 + e.IntentToAdd = extended&intentToAddMask != 0 + e.SkipWorktree = extended&skipWorkTreeMask != 0 + } + + if err := d.readEntryName(idx, e, flags); err != nil { + return nil, err + } + + return e, d.padEntry(idx, e, read) +} + +func (d *Decoder) readEntryName(idx *Index, e *Entry, flags uint16) error { + var name string + var err error + + switch idx.Version { + case 2, 3: + len := flags & nameMask + name, err = d.doReadEntryName(len) + case 4: + name, err = d.doReadEntryNameV4() + default: + return ErrUnsupportedVersion + } + + if err != nil { + return err + } + + e.Name = name + return nil +} + +func (d *Decoder) doReadEntryNameV4() (string, error) { + l, err := binary.ReadVariableWidthInt(d.r) + if err != nil { + return "", err + } + + var base string + if d.lastEntry != nil { + base = d.lastEntry.Name[:len(d.lastEntry.Name)-int(l)] + } + + name, err := binary.ReadUntil(d.r, '\x00') + if err != nil { + return "", err + } + + return base + string(name), nil +} + +func (d *Decoder) doReadEntryName(len uint16) (string, error) { + name := make([]byte, len) + _, err := io.ReadFull(d.r, name) + + return string(name), err +} + +// Index entries are padded out to the next 8 byte alignment +// for historical reasons related to how C Git read the files. +func (d *Decoder) padEntry(idx *Index, e *Entry, read int) error { + if idx.Version == 4 { + return nil + } + + entrySize := read + len(e.Name) + padLen := 8 - entrySize%8 + _, err := io.CopyN(ioutil.Discard, d.r, int64(padLen)) + return err +} + +func (d *Decoder) readExtensions(idx *Index) error { + // TODO: support 'Split index' and 'Untracked cache' extensions, take in + // count that they are not supported by jgit or libgit + + var expected []byte + var err error + + var header [4]byte + for { + expected = d.hash.Sum(nil) + + var n int + if n, err = io.ReadFull(d.r, header[:]); err != nil { + if n == 0 { + err = io.EOF + } + + break + } + + err = d.readExtension(idx, header[:]) + if err != nil { + break + } + } + + if err != errUnknownExtension { + return err + } + + return d.readChecksum(expected, header) +} + +func (d *Decoder) readExtension(idx *Index, header []byte) error { + switch { + case bytes.Equal(header, treeExtSignature): + r, err := d.getExtensionReader() + if err != nil { + return err + } + + idx.Cache = &Tree{} + d := &treeExtensionDecoder{r} + if err := d.Decode(idx.Cache); err != nil { + return err + } + case bytes.Equal(header, resolveUndoExtSignature): + r, err := d.getExtensionReader() + if err != nil { + return err + } + + idx.ResolveUndo = &ResolveUndo{} + d := &resolveUndoDecoder{r} + if err := d.Decode(idx.ResolveUndo); err != nil { + return err + } + case bytes.Equal(header, endOfIndexEntryExtSignature): + r, err := d.getExtensionReader() + if err != nil { + return err + } + + idx.EndOfIndexEntry = &EndOfIndexEntry{} + d := &endOfIndexEntryDecoder{r} + if err := d.Decode(idx.EndOfIndexEntry); err != nil { + return err + } + default: + return errUnknownExtension + } + + return nil +} + +func (d *Decoder) getExtensionReader() (*bufio.Reader, error) { + len, err := binary.ReadUint32(d.r) + if err != nil { + return nil, err + } + + d.extReader.Reset(&io.LimitedReader{R: d.r, N: int64(len)}) + return d.extReader, nil +} + +func (d *Decoder) readChecksum(expected []byte, alreadyRead [4]byte) error { + var h plumbing.Hash + copy(h[:4], alreadyRead[:]) + + if _, err := io.ReadFull(d.r, h[4:]); err != nil { + return err + } + + if !bytes.Equal(h[:], expected) { + return ErrInvalidChecksum + } + + return nil +} + +func validateHeader(r io.Reader) (version uint32, err error) { + var s = make([]byte, 4) + if _, err := io.ReadFull(r, s); err != nil { + return 0, err + } + + if !bytes.Equal(s, indexSignature) { + return 0, ErrMalformedSignature + } + + version, err = binary.ReadUint32(r) + if err != nil { + return 0, err + } + + if version < DecodeVersionSupported.Min || version > DecodeVersionSupported.Max { + return 0, ErrUnsupportedVersion + } + + return +} + +type treeExtensionDecoder struct { + r *bufio.Reader +} + +func (d *treeExtensionDecoder) Decode(t *Tree) error { + for { + e, err := d.readEntry() + if err != nil { + if err == io.EOF { + return nil + } + + return err + } + + if e == nil { + continue + } + + t.Entries = append(t.Entries, *e) + } +} + +func (d *treeExtensionDecoder) readEntry() (*TreeEntry, error) { + e := &TreeEntry{} + + path, err := binary.ReadUntil(d.r, '\x00') + if err != nil { + return nil, err + } + + e.Path = string(path) + + count, err := binary.ReadUntil(d.r, ' ') + if err != nil { + return nil, err + } + + i, err := strconv.Atoi(string(count)) + if err != nil { + return nil, err + } + + // An entry can be in an invalidated state and is represented by having a + // negative number in the entry_count field. + if i == -1 { + return nil, nil + } + + e.Entries = i + trees, err := binary.ReadUntil(d.r, '\n') + if err != nil { + return nil, err + } + + i, err = strconv.Atoi(string(trees)) + if err != nil { + return nil, err + } + + e.Trees = i + _, err = io.ReadFull(d.r, e.Hash[:]) + if err != nil { + return nil, err + } + return e, nil +} + +type resolveUndoDecoder struct { + r *bufio.Reader +} + +func (d *resolveUndoDecoder) Decode(ru *ResolveUndo) error { + for { + e, err := d.readEntry() + if err != nil { + if err == io.EOF { + return nil + } + + return err + } + + ru.Entries = append(ru.Entries, *e) + } +} + +func (d *resolveUndoDecoder) readEntry() (*ResolveUndoEntry, error) { + e := &ResolveUndoEntry{ + Stages: make(map[Stage]plumbing.Hash), + } + + path, err := binary.ReadUntil(d.r, '\x00') + if err != nil { + return nil, err + } + + e.Path = string(path) + + for i := 0; i < 3; i++ { + if err := d.readStage(e, Stage(i+1)); err != nil { + return nil, err + } + } + + for s := range e.Stages { + var hash plumbing.Hash + if _, err := io.ReadFull(d.r, hash[:]); err != nil { + return nil, err + } + + e.Stages[s] = hash + } + + return e, nil +} + +func (d *resolveUndoDecoder) readStage(e *ResolveUndoEntry, s Stage) error { + ascii, err := binary.ReadUntil(d.r, '\x00') + if err != nil { + return err + } + + stage, err := strconv.ParseInt(string(ascii), 8, 64) + if err != nil { + return err + } + + if stage != 0 { + e.Stages[s] = plumbing.ZeroHash + } + + return nil +} + +type endOfIndexEntryDecoder struct { + r *bufio.Reader +} + +func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error { + var err error + e.Offset, err = binary.ReadUint32(d.r) + if err != nil { + return err + } + + _, err = io.ReadFull(d.r, e.Hash[:]) + return err +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/doc.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/doc.go new file mode 100644 index 0000000000..39ae6ad5f9 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/doc.go @@ -0,0 +1,360 @@ +// Package index implements encoding and decoding of index format files. +// +// Git index format +// ================ +// +// == The Git index file has the following format +// +// All binary numbers are in network byte order. Version 2 is described +// here unless stated otherwise. +// +// - A 12-byte header consisting of +// +// 4-byte signature: +// The signature is { 'D', 'I', 'R', 'C' } (stands for "dircache") +// +// 4-byte version number: +// The current supported versions are 2, 3 and 4. +// +// 32-bit number of index entries. +// +// - A number of sorted index entries (see below). +// +// - Extensions +// +// Extensions are identified by signature. Optional extensions can +// be ignored if Git does not understand them. +// +// Git currently supports cached tree and resolve undo extensions. +// +// 4-byte extension signature. If the first byte is 'A'..'Z' the +// extension is optional and can be ignored. +// +// 32-bit size of the extension +// +// Extension data +// +// - 160-bit SHA-1 over the content of the index file before this +// checksum. +// +// == Index entry +// +// Index entries are sorted in ascending order on the name field, +// interpreted as a string of unsigned bytes (i.e. memcmp() order, no +// localization, no special casing of directory separator '/'). Entries +// with the same name are sorted by their stage field. +// +// 32-bit ctime seconds, the last time a file's metadata changed +// this is stat(2) data +// +// 32-bit ctime nanosecond fractions +// this is stat(2) data +// +// 32-bit mtime seconds, the last time a file's data changed +// this is stat(2) data +// +// 32-bit mtime nanosecond fractions +// this is stat(2) data +// +// 32-bit dev +// this is stat(2) data +// +// 32-bit ino +// this is stat(2) data +// +// 32-bit mode, split into (high to low bits) +// +// 4-bit object type +// valid values in binary are 1000 (regular file), 1010 (symbolic link) +// and 1110 (gitlink) +// +// 3-bit unused +// +// 9-bit unix permission. Only 0755 and 0644 are valid for regular files. +// Symbolic links and gitlinks have value 0 in this field. +// +// 32-bit uid +// this is stat(2) data +// +// 32-bit gid +// this is stat(2) data +// +// 32-bit file size +// This is the on-disk size from stat(2), truncated to 32-bit. +// +// 160-bit SHA-1 for the represented object +// +// A 16-bit 'flags' field split into (high to low bits) +// +// 1-bit assume-valid flag +// +// 1-bit extended flag (must be zero in version 2) +// +// 2-bit stage (during merge) +// +// 12-bit name length if the length is less than 0xFFF; otherwise 0xFFF +// is stored in this field. +// +// (Version 3 or later) A 16-bit field, only applicable if the +// "extended flag" above is 1, split into (high to low bits). +// +// 1-bit reserved for future +// +// 1-bit skip-worktree flag (used by sparse checkout) +// +// 1-bit intent-to-add flag (used by "git add -N") +// +// 13-bit unused, must be zero +// +// Entry path name (variable length) relative to top level directory +// (without leading slash). '/' is used as path separator. The special +// path components ".", ".." and ".git" (without quotes) are disallowed. +// Trailing slash is also disallowed. +// +// The exact encoding is undefined, but the '.' and '/' characters +// are encoded in 7-bit ASCII and the encoding cannot contain a NUL +// byte (iow, this is a UNIX pathname). +// +// (Version 4) In version 4, the entry path name is prefix-compressed +// relative to the path name for the previous entry (the very first +// entry is encoded as if the path name for the previous entry is an +// empty string). At the beginning of an entry, an integer N in the +// variable width encoding (the same encoding as the offset is encoded +// for OFS_DELTA pack entries; see pack-format.txt) is stored, followed +// by a NUL-terminated string S. Removing N bytes from the end of the +// path name for the previous entry, and replacing it with the string S +// yields the path name for this entry. +// +// 1-8 nul bytes as necessary to pad the entry to a multiple of eight bytes +// while keeping the name NUL-terminated. +// +// (Version 4) In version 4, the padding after the pathname does not +// exist. +// +// Interpretation of index entries in split index mode is completely +// different. See below for details. +// +// == Extensions +// +// === Cached tree +// +// Cached tree extension contains pre-computed hashes for trees that can +// be derived from the index. It helps speed up tree object generation +// from index for a new commit. +// +// When a path is updated in index, the path must be invalidated and +// removed from tree cache. +// +// The signature for this extension is { 'T', 'R', 'E', 'E' }. +// +// A series of entries fill the entire extension; each of which +// consists of: +// +// - NUL-terminated path component (relative to its parent directory); +// +// - ASCII decimal number of entries in the index that is covered by the +// tree this entry represents (entry_count); +// +// - A space (ASCII 32); +// +// - ASCII decimal number that represents the number of subtrees this +// tree has; +// +// - A newline (ASCII 10); and +// +// - 160-bit object name for the object that would result from writing +// this span of index as a tree. +// +// An entry can be in an invalidated state and is represented by having +// a negative number in the entry_count field. In this case, there is no +// object name and the next entry starts immediately after the newline. +// When writing an invalid entry, -1 should always be used as entry_count. +// +// The entries are written out in the top-down, depth-first order. The +// first entry represents the root level of the repository, followed by the +// first subtree--let's call this A--of the root level (with its name +// relative to the root level), followed by the first subtree of A (with +// its name relative to A), ... +// +// === Resolve undo +// +// A conflict is represented in the index as a set of higher stage entries. +// When a conflict is resolved (e.g. with "git add path"), these higher +// stage entries will be removed and a stage-0 entry with proper resolution +// is added. +// +// When these higher stage entries are removed, they are saved in the +// resolve undo extension, so that conflicts can be recreated (e.g. with +// "git checkout -m"), in case users want to redo a conflict resolution +// from scratch. +// +// The signature for this extension is { 'R', 'E', 'U', 'C' }. +// +// A series of entries fill the entire extension; each of which +// consists of: +// +// - NUL-terminated pathname the entry describes (relative to the root of +// the repository, i.e. full pathname); +// +// - Three NUL-terminated ASCII octal numbers, entry mode of entries in +// stage 1 to 3 (a missing stage is represented by "0" in this field); +// and +// +// - At most three 160-bit object names of the entry in stages from 1 to 3 +// (nothing is written for a missing stage). +// +// === Split index +// +// In split index mode, the majority of index entries could be stored +// in a separate file. This extension records the changes to be made on +// top of that to produce the final index. +// +// The signature for this extension is { 'l', 'i', 'n', 'k' }. +// +// The extension consists of: +// +// - 160-bit SHA-1 of the shared index file. The shared index file path +// is $GIT_DIR/sharedindex.. If all 160 bits are zero, the +// index does not require a shared index file. +// +// - An ewah-encoded delete bitmap, each bit represents an entry in the +// shared index. If a bit is set, its corresponding entry in the +// shared index will be removed from the final index. Note, because +// a delete operation changes index entry positions, but we do need +// original positions in replace phase, it's best to just mark +// entries for removal, then do a mass deletion after replacement. +// +// - An ewah-encoded replace bitmap, each bit represents an entry in +// the shared index. If a bit is set, its corresponding entry in the +// shared index will be replaced with an entry in this index +// file. All replaced entries are stored in sorted order in this +// index. The first "1" bit in the replace bitmap corresponds to the +// first index entry, the second "1" bit to the second entry and so +// on. Replaced entries may have empty path names to save space. +// +// The remaining index entries after replaced ones will be added to the +// final index. These added entries are also sorted by entry name then +// stage. +// +// == Untracked cache +// +// Untracked cache saves the untracked file list and necessary data to +// verify the cache. The signature for this extension is { 'U', 'N', +// 'T', 'R' }. +// +// The extension starts with +// +// - A sequence of NUL-terminated strings, preceded by the size of the +// sequence in variable width encoding. Each string describes the +// environment where the cache can be used. +// +// - Stat data of $GIT_DIR/info/exclude. See "Index entry" section from +// ctime field until "file size". +// +// - Stat data of plumbing.excludesfile +// +// - 32-bit dir_flags (see struct dir_struct) +// +// - 160-bit SHA-1 of $GIT_DIR/info/exclude. Null SHA-1 means the file +// does not exist. +// +// - 160-bit SHA-1 of plumbing.excludesfile. Null SHA-1 means the file does +// not exist. +// +// - NUL-terminated string of per-dir exclude file name. This usually +// is ".gitignore". +// +// - The number of following directory blocks, variable width +// encoding. If this number is zero, the extension ends here with a +// following NUL. +// +// - A number of directory blocks in depth-first-search order, each +// consists of +// +// - The number of untracked entries, variable width encoding. +// +// - The number of sub-directory blocks, variable width encoding. +// +// - The directory name terminated by NUL. +// +// - A number of untracked file/dir names terminated by NUL. +// +// The remaining data of each directory block is grouped by type: +// +// - An ewah bitmap, the n-th bit marks whether the n-th directory has +// valid untracked cache entries. +// +// - An ewah bitmap, the n-th bit records "check-only" bit of +// read_directory_recursive() for the n-th directory. +// +// - An ewah bitmap, the n-th bit indicates whether SHA-1 and stat data +// is valid for the n-th directory and exists in the next data. +// +// - An array of stat data. The n-th data corresponds with the n-th +// "one" bit in the previous ewah bitmap. +// +// - An array of SHA-1. The n-th SHA-1 corresponds with the n-th "one" bit +// in the previous ewah bitmap. +// +// - One NUL. +// +// == File System Monitor cache +// +// The file system monitor cache tracks files for which the core.fsmonitor +// hook has told us about changes. The signature for this extension is +// { 'F', 'S', 'M', 'N' }. +// +// The extension starts with +// +// - 32-bit version number: the current supported version is 1. +// +// - 64-bit time: the extension data reflects all changes through the given +// time which is stored as the nanoseconds elapsed since midnight, +// January 1, 1970. +// +// - 32-bit bitmap size: the size of the CE_FSMONITOR_VALID bitmap. +// +// - An ewah bitmap, the n-th bit indicates whether the n-th index entry +// is not CE_FSMONITOR_VALID. +// +// == End of Index Entry +// +// The End of Index Entry (EOIE) is used to locate the end of the variable +// length index entries and the beginning of the extensions. Code can take +// advantage of this to quickly locate the index extensions without having +// to parse through all of the index entries. +// +// Because it must be able to be loaded before the variable length cache +// entries and other index extensions, this extension must be written last. +// The signature for this extension is { 'E', 'O', 'I', 'E' }. +// +// The extension consists of: +// +// - 32-bit offset to the end of the index entries +// +// - 160-bit SHA-1 over the extension types and their sizes (but not +// their contents). E.g. if we have "TREE" extension that is N-bytes +// long, "REUC" extension that is M-bytes long, followed by "EOIE", +// then the hash would be: +// +// SHA-1("TREE" + + +// "REUC" + ) +// +// == Index Entry Offset Table +// +// The Index Entry Offset Table (IEOT) is used to help address the CPU +// cost of loading the index by enabling multi-threading the process of +// converting cache entries from the on-disk format to the in-memory format. +// The signature for this extension is { 'I', 'E', 'O', 'T' }. +// +// The extension consists of: +// +// - 32-bit version (currently 1) +// +// - A number of index offset entries each consisting of: +// +// - 32-bit offset from the beginning of the file to the first cache entry +// in this block of entries. +// +// - 32-bit count of cache entries in this blockpackage index +package index diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/encoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/encoder.go new file mode 100644 index 0000000000..00d4e7a317 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/encoder.go @@ -0,0 +1,150 @@ +package index + +import ( + "bytes" + "crypto/sha1" + "errors" + "hash" + "io" + "sort" + "time" + + "github.com/go-git/go-git/v5/utils/binary" +) + +var ( + // EncodeVersionSupported is the range of supported index versions + EncodeVersionSupported uint32 = 2 + + // ErrInvalidTimestamp is returned by Encode if a Index with a Entry with + // negative timestamp values + ErrInvalidTimestamp = errors.New("negative timestamps are not allowed") +) + +// An Encoder writes an Index to an output stream. +type Encoder struct { + w io.Writer + hash hash.Hash +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + h := sha1.New() + mw := io.MultiWriter(w, h) + return &Encoder{mw, h} +} + +// Encode writes the Index to the stream of the encoder. +func (e *Encoder) Encode(idx *Index) error { + // TODO: support versions v3 and v4 + // TODO: support extensions + if idx.Version != EncodeVersionSupported { + return ErrUnsupportedVersion + } + + if err := e.encodeHeader(idx); err != nil { + return err + } + + if err := e.encodeEntries(idx); err != nil { + return err + } + + return e.encodeFooter() +} + +func (e *Encoder) encodeHeader(idx *Index) error { + return binary.Write(e.w, + indexSignature, + idx.Version, + uint32(len(idx.Entries)), + ) +} + +func (e *Encoder) encodeEntries(idx *Index) error { + sort.Sort(byName(idx.Entries)) + + for _, entry := range idx.Entries { + if err := e.encodeEntry(entry); err != nil { + return err + } + + wrote := entryHeaderLength + len(entry.Name) + if err := e.padEntry(wrote); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeEntry(entry *Entry) error { + if entry.IntentToAdd || entry.SkipWorktree { + return ErrUnsupportedVersion + } + + sec, nsec, err := e.timeToUint32(&entry.CreatedAt) + if err != nil { + return err + } + + msec, mnsec, err := e.timeToUint32(&entry.ModifiedAt) + if err != nil { + return err + } + + flags := uint16(entry.Stage&0x3) << 12 + if l := len(entry.Name); l < nameMask { + flags |= uint16(l) + } else { + flags |= nameMask + } + + flow := []interface{}{ + sec, nsec, + msec, mnsec, + entry.Dev, + entry.Inode, + entry.Mode, + entry.UID, + entry.GID, + entry.Size, + entry.Hash[:], + flags, + } + + if err := binary.Write(e.w, flow...); err != nil { + return err + } + + return binary.Write(e.w, []byte(entry.Name)) +} + +func (e *Encoder) timeToUint32(t *time.Time) (uint32, uint32, error) { + if t.IsZero() { + return 0, 0, nil + } + + if t.Unix() < 0 || t.UnixNano() < 0 { + return 0, 0, ErrInvalidTimestamp + } + + return uint32(t.Unix()), uint32(t.Nanosecond()), nil +} + +func (e *Encoder) padEntry(wrote int) error { + padLen := 8 - wrote%8 + + _, err := e.w.Write(bytes.Repeat([]byte{'\x00'}, padLen)) + return err +} + +func (e *Encoder) encodeFooter() error { + return binary.Write(e.w, e.hash.Sum(nil)) +} + +type byName []*Entry + +func (l byName) Len() int { return len(l) } +func (l byName) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l byName) Less(i, j int) bool { return l[i].Name < l[j].Name } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/index.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/index.go new file mode 100644 index 0000000000..649416a2b4 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/index.go @@ -0,0 +1,213 @@ +package index + +import ( + "bytes" + "errors" + "fmt" + "path/filepath" + "time" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/filemode" +) + +var ( + // ErrUnsupportedVersion is returned by Decode when the index file version + // is not supported. + ErrUnsupportedVersion = errors.New("unsupported version") + // ErrEntryNotFound is returned by Index.Entry, if an entry is not found. + ErrEntryNotFound = errors.New("entry not found") + + indexSignature = []byte{'D', 'I', 'R', 'C'} + treeExtSignature = []byte{'T', 'R', 'E', 'E'} + resolveUndoExtSignature = []byte{'R', 'E', 'U', 'C'} + endOfIndexEntryExtSignature = []byte{'E', 'O', 'I', 'E'} +) + +// Stage during merge +type Stage int + +const ( + // Merged is the default stage, fully merged + Merged Stage = 1 + // AncestorMode is the base revision + AncestorMode Stage = 1 + // OurMode is the first tree revision, ours + OurMode Stage = 2 + // TheirMode is the second tree revision, theirs + TheirMode Stage = 3 +) + +// Index contains the information about which objects are currently checked out +// in the worktree, having information about the working files. Changes in +// worktree are detected using this Index. The Index is also used during merges +type Index struct { + // Version is index version + Version uint32 + // Entries collection of entries represented by this Index. The order of + // this collection is not guaranteed + Entries []*Entry + // Cache represents the 'Cached tree' extension + Cache *Tree + // ResolveUndo represents the 'Resolve undo' extension + ResolveUndo *ResolveUndo + // EndOfIndexEntry represents the 'End of Index Entry' extension + EndOfIndexEntry *EndOfIndexEntry +} + +// Add creates a new Entry and returns it. The caller should first check that +// another entry with the same path does not exist. +func (i *Index) Add(path string) *Entry { + e := &Entry{ + Name: filepath.ToSlash(path), + } + + i.Entries = append(i.Entries, e) + return e +} + +// Entry returns the entry that match the given path, if any. +func (i *Index) Entry(path string) (*Entry, error) { + path = filepath.ToSlash(path) + for _, e := range i.Entries { + if e.Name == path { + return e, nil + } + } + + return nil, ErrEntryNotFound +} + +// Remove remove the entry that match the give path and returns deleted entry. +func (i *Index) Remove(path string) (*Entry, error) { + path = filepath.ToSlash(path) + for index, e := range i.Entries { + if e.Name == path { + i.Entries = append(i.Entries[:index], i.Entries[index+1:]...) + return e, nil + } + } + + return nil, ErrEntryNotFound +} + +// Glob returns the all entries matching pattern or nil if there is no matching +// entry. The syntax of patterns is the same as in filepath.Glob. +func (i *Index) Glob(pattern string) (matches []*Entry, err error) { + pattern = filepath.ToSlash(pattern) + for _, e := range i.Entries { + m, err := match(pattern, e.Name) + if err != nil { + return nil, err + } + + if m { + matches = append(matches, e) + } + } + + return +} + +// String is equivalent to `git ls-files --stage --debug` +func (i *Index) String() string { + buf := bytes.NewBuffer(nil) + for _, e := range i.Entries { + buf.WriteString(e.String()) + } + + return buf.String() +} + +// Entry represents a single file (or stage of a file) in the cache. An entry +// represents exactly one stage of a file. If a file path is unmerged then +// multiple Entry instances may appear for the same path name. +type Entry struct { + // Hash is the SHA1 of the represented file + Hash plumbing.Hash + // Name is the Entry path name relative to top level directory + Name string + // CreatedAt time when the tracked path was created + CreatedAt time.Time + // ModifiedAt time when the tracked path was changed + ModifiedAt time.Time + // Dev and Inode of the tracked path + Dev, Inode uint32 + // Mode of the path + Mode filemode.FileMode + // UID and GID, userid and group id of the owner + UID, GID uint32 + // Size is the length in bytes for regular files + Size uint32 + // Stage on a merge is defines what stage is representing this entry + // https://git-scm.com/book/en/v2/Git-Tools-Advanced-Merging + Stage Stage + // SkipWorktree used in sparse checkouts + // https://git-scm.com/docs/git-read-tree#_sparse_checkout + SkipWorktree bool + // IntentToAdd record only the fact that the path will be added later + // https://git-scm.com/docs/git-add ("git add -N") + IntentToAdd bool +} + +func (e Entry) String() string { + buf := bytes.NewBuffer(nil) + + fmt.Fprintf(buf, "%06o %s %d\t%s\n", e.Mode, e.Hash, e.Stage, e.Name) + fmt.Fprintf(buf, " ctime: %d:%d\n", e.CreatedAt.Unix(), e.CreatedAt.Nanosecond()) + fmt.Fprintf(buf, " mtime: %d:%d\n", e.ModifiedAt.Unix(), e.ModifiedAt.Nanosecond()) + fmt.Fprintf(buf, " dev: %d\tino: %d\n", e.Dev, e.Inode) + fmt.Fprintf(buf, " uid: %d\tgid: %d\n", e.UID, e.GID) + fmt.Fprintf(buf, " size: %d\tflags: %x\n", e.Size, 0) + + return buf.String() +} + +// Tree contains pre-computed hashes for trees that can be derived from the +// index. It helps speed up tree object generation from index for a new commit. +type Tree struct { + Entries []TreeEntry +} + +// TreeEntry entry of a cached Tree +type TreeEntry struct { + // Path component (relative to its parent directory) + Path string + // Entries is the number of entries in the index that is covered by the tree + // this entry represents. + Entries int + // Trees is the number that represents the number of subtrees this tree has + Trees int + // Hash object name for the object that would result from writing this span + // of index as a tree. + Hash plumbing.Hash +} + +// ResolveUndo is used when a conflict is resolved (e.g. with "git add path"), +// these higher stage entries are removed and a stage-0 entry with proper +// resolution is added. When these higher stage entries are removed, they are +// saved in the resolve undo extension. +type ResolveUndo struct { + Entries []ResolveUndoEntry +} + +// ResolveUndoEntry contains the information about a conflict when is resolved +type ResolveUndoEntry struct { + Path string + Stages map[Stage]plumbing.Hash +} + +// EndOfIndexEntry is the End of Index Entry (EOIE) is used to locate the end of +// the variable length index entries and the beginning of the extensions. Code +// can take advantage of this to quickly locate the index extensions without +// having to parse through all of the index entries. +// +// Because it must be able to be loaded before the variable length cache +// entries and other index extensions, this extension must be written last. +type EndOfIndexEntry struct { + // Offset to the end of the index entries + Offset uint32 + // Hash is a SHA-1 over the extension types and their sizes (but not + // their contents). + Hash plumbing.Hash +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/match.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/match.go new file mode 100644 index 0000000000..2891d7d34c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/match.go @@ -0,0 +1,186 @@ +package index + +import ( + "path/filepath" + "runtime" + "unicode/utf8" +) + +// match is filepath.Match with support to match fullpath and not only filenames +// code from: +// https://github.com/golang/go/blob/39852bf4cce6927e01d0136c7843f65a801738cb/src/path/filepath/match.go#L44-L224 +func match(pattern, name string) (matched bool, err error) { +Pattern: + for len(pattern) > 0 { + var star bool + var chunk string + star, chunk, pattern = scanChunk(pattern) + + // Look for match at current position. + t, ok, err := matchChunk(chunk, name) + // if we're the last chunk, make sure we've exhausted the name + // otherwise we'll give a false result even if we could still match + // using the star + if ok && (len(t) == 0 || len(pattern) > 0) { + name = t + continue + } + if err != nil { + return false, err + } + if star { + // Look for match skipping i+1 bytes. + // Cannot skip /. + for i := 0; i < len(name); i++ { + t, ok, err := matchChunk(chunk, name[i+1:]) + if ok { + // if we're the last chunk, make sure we exhausted the name + if len(pattern) == 0 && len(t) > 0 { + continue + } + name = t + continue Pattern + } + if err != nil { + return false, err + } + } + } + return false, nil + } + return len(name) == 0, nil +} + +// scanChunk gets the next segment of pattern, which is a non-star string +// possibly preceded by a star. +func scanChunk(pattern string) (star bool, chunk, rest string) { + for len(pattern) > 0 && pattern[0] == '*' { + pattern = pattern[1:] + star = true + } + inrange := false + var i int +Scan: + for i = 0; i < len(pattern); i++ { + switch pattern[i] { + case '\\': + if runtime.GOOS != "windows" { + // error check handled in matchChunk: bad pattern. + if i+1 < len(pattern) { + i++ + } + } + case '[': + inrange = true + case ']': + inrange = false + case '*': + if !inrange { + break Scan + } + } + } + return star, pattern[0:i], pattern[i:] +} + +// matchChunk checks whether chunk matches the beginning of s. +// If so, it returns the remainder of s (after the match). +// Chunk is all single-character operators: literals, char classes, and ?. +func matchChunk(chunk, s string) (rest string, ok bool, err error) { + for len(chunk) > 0 { + if len(s) == 0 { + return + } + switch chunk[0] { + case '[': + // character class + r, n := utf8.DecodeRuneInString(s) + s = s[n:] + chunk = chunk[1:] + // We can't end right after '[', we're expecting at least + // a closing bracket and possibly a caret. + if len(chunk) == 0 { + err = filepath.ErrBadPattern + return + } + // possibly negated + negated := chunk[0] == '^' + if negated { + chunk = chunk[1:] + } + // parse all ranges + match := false + nrange := 0 + for { + if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 { + chunk = chunk[1:] + break + } + var lo, hi rune + if lo, chunk, err = getEsc(chunk); err != nil { + return + } + hi = lo + if chunk[0] == '-' { + if hi, chunk, err = getEsc(chunk[1:]); err != nil { + return + } + } + if lo <= r && r <= hi { + match = true + } + nrange++ + } + if match == negated { + return + } + + case '?': + _, n := utf8.DecodeRuneInString(s) + s = s[n:] + chunk = chunk[1:] + + case '\\': + if runtime.GOOS != "windows" { + chunk = chunk[1:] + if len(chunk) == 0 { + err = filepath.ErrBadPattern + return + } + } + fallthrough + + default: + if chunk[0] != s[0] { + return + } + s = s[1:] + chunk = chunk[1:] + } + } + return s, true, nil +} + +// getEsc gets a possibly-escaped character from chunk, for a character class. +func getEsc(chunk string) (r rune, nchunk string, err error) { + if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' { + err = filepath.ErrBadPattern + return + } + if chunk[0] == '\\' && runtime.GOOS != "windows" { + chunk = chunk[1:] + if len(chunk) == 0 { + err = filepath.ErrBadPattern + return + } + } + r, n := utf8.DecodeRuneInString(chunk) + if r == utf8.RuneError && n == 1 { + err = filepath.ErrBadPattern + } + nchunk = chunk[n:] + if len(nchunk) == 0 { + err = filepath.ErrBadPattern + } + return +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/doc.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/doc.go new file mode 100644 index 0000000000..a7145160ae --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/doc.go @@ -0,0 +1,2 @@ +// Package objfile implements encoding and decoding of object files. +package objfile diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/reader.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/reader.go new file mode 100644 index 0000000000..b6b2ca06dd --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/reader.go @@ -0,0 +1,114 @@ +package objfile + +import ( + "compress/zlib" + "errors" + "io" + "strconv" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/format/packfile" +) + +var ( + ErrClosed = errors.New("objfile: already closed") + ErrHeader = errors.New("objfile: invalid header") + ErrNegativeSize = errors.New("objfile: negative object size") +) + +// Reader reads and decodes compressed objfile data from a provided io.Reader. +// Reader implements io.ReadCloser. Close should be called when finished with +// the Reader. Close will not close the underlying io.Reader. +type Reader struct { + multi io.Reader + zlib io.ReadCloser + hasher plumbing.Hasher +} + +// NewReader returns a new Reader reading from r. +func NewReader(r io.Reader) (*Reader, error) { + zlib, err := zlib.NewReader(r) + if err != nil { + return nil, packfile.ErrZLib.AddDetails(err.Error()) + } + + return &Reader{ + zlib: zlib, + }, nil +} + +// Header reads the type and the size of object, and prepares the reader for read +func (r *Reader) Header() (t plumbing.ObjectType, size int64, err error) { + var raw []byte + raw, err = r.readUntil(' ') + if err != nil { + return + } + + t, err = plumbing.ParseObjectType(string(raw)) + if err != nil { + return + } + + raw, err = r.readUntil(0) + if err != nil { + return + } + + size, err = strconv.ParseInt(string(raw), 10, 64) + if err != nil { + err = ErrHeader + return + } + + defer r.prepareForRead(t, size) + return +} + +// readSlice reads one byte at a time from r until it encounters delim or an +// error. +func (r *Reader) readUntil(delim byte) ([]byte, error) { + var buf [1]byte + value := make([]byte, 0, 16) + for { + if n, err := r.zlib.Read(buf[:]); err != nil && (err != io.EOF || n == 0) { + if err == io.EOF { + return nil, ErrHeader + } + return nil, err + } + + if buf[0] == delim { + return value, nil + } + + value = append(value, buf[0]) + } +} + +func (r *Reader) prepareForRead(t plumbing.ObjectType, size int64) { + r.hasher = plumbing.NewHasher(t, size) + r.multi = io.TeeReader(r.zlib, r.hasher) +} + +// Read reads len(p) bytes into p from the object data stream. It returns +// the number of bytes read (0 <= n <= len(p)) and any error encountered. Even +// if Read returns n < len(p), it may use all of p as scratch space during the +// call. +// +// If Read encounters the end of the data stream it will return err == io.EOF, +// either in the current call if n > 0 or in a subsequent call. +func (r *Reader) Read(p []byte) (n int, err error) { + return r.multi.Read(p) +} + +// Hash returns the hash of the object data stream that has been read so far. +func (r *Reader) Hash() plumbing.Hash { + return r.hasher.Sum() +} + +// Close releases any resources consumed by the Reader. Calling Close does not +// close the wrapped io.Reader originally passed to NewReader. +func (r *Reader) Close() error { + return r.zlib.Close() +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/writer.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/writer.go new file mode 100644 index 0000000000..2a96a4370b --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/writer.go @@ -0,0 +1,109 @@ +package objfile + +import ( + "compress/zlib" + "errors" + "io" + "strconv" + + "github.com/go-git/go-git/v5/plumbing" +) + +var ( + ErrOverflow = errors.New("objfile: declared data length exceeded (overflow)") +) + +// Writer writes and encodes data in compressed objfile format to a provided +// io.Writer. Close should be called when finished with the Writer. Close will +// not close the underlying io.Writer. +type Writer struct { + raw io.Writer + zlib io.WriteCloser + hasher plumbing.Hasher + multi io.Writer + + closed bool + pending int64 // number of unwritten bytes +} + +// NewWriter returns a new Writer writing to w. +// +// The returned Writer implements io.WriteCloser. Close should be called when +// finished with the Writer. Close will not close the underlying io.Writer. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + raw: w, + zlib: zlib.NewWriter(w), + } +} + +// WriteHeader writes the type and the size and prepares to accept the object's +// contents. If an invalid t is provided, plumbing.ErrInvalidType is returned. If a +// negative size is provided, ErrNegativeSize is returned. +func (w *Writer) WriteHeader(t plumbing.ObjectType, size int64) error { + if !t.Valid() { + return plumbing.ErrInvalidType + } + if size < 0 { + return ErrNegativeSize + } + + b := t.Bytes() + b = append(b, ' ') + b = append(b, []byte(strconv.FormatInt(size, 10))...) + b = append(b, 0) + + defer w.prepareForWrite(t, size) + _, err := w.zlib.Write(b) + + return err +} + +func (w *Writer) prepareForWrite(t plumbing.ObjectType, size int64) { + w.pending = size + + w.hasher = plumbing.NewHasher(t, size) + w.multi = io.MultiWriter(w.zlib, w.hasher) +} + +// Write writes the object's contents. Write returns the error ErrOverflow if +// more than size bytes are written after WriteHeader. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.closed { + return 0, ErrClosed + } + + overwrite := false + if int64(len(p)) > w.pending { + p = p[0:w.pending] + overwrite = true + } + + n, err = w.multi.Write(p) + w.pending -= int64(n) + if err == nil && overwrite { + err = ErrOverflow + return + } + + return +} + +// Hash returns the hash of the object data stream that has been written so far. +// It can be called before or after Close. +func (w *Writer) Hash() plumbing.Hash { + return w.hasher.Sum() // Not yet closed, return hash of data written so far +} + +// Close releases any resources consumed by the Writer. +// +// Calling Close does not close the wrapped io.Writer originally passed to +// NewWriter. +func (w *Writer) Close() error { + if err := w.zlib.Close(); err != nil { + return err + } + + w.closed = true + return nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/common.go new file mode 100644 index 0000000000..df423ad50c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/common.go @@ -0,0 +1,78 @@ +package packfile + +import ( + "bytes" + "compress/zlib" + "io" + "sync" + + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +var signature = []byte{'P', 'A', 'C', 'K'} + +const ( + // VersionSupported is the packfile version supported by this package + VersionSupported uint32 = 2 + + firstLengthBits = uint8(4) // the first byte into object header has 4 bits to store the length + lengthBits = uint8(7) // subsequent bytes has 7 bits to store the length + maskFirstLength = 15 // 0000 1111 + maskContinue = 0x80 // 1000 0000 + maskLength = uint8(127) // 0111 1111 + maskType = uint8(112) // 0111 0000 +) + +// UpdateObjectStorage updates the storer with the objects in the given +// packfile. +func UpdateObjectStorage(s storer.Storer, packfile io.Reader) error { + if pw, ok := s.(storer.PackfileWriter); ok { + return WritePackfileToObjectStorage(pw, packfile) + } + + p, err := NewParserWithStorage(NewScanner(packfile), s) + if err != nil { + return err + } + + _, err = p.Parse() + return err +} + +// WritePackfileToObjectStorage writes all the packfile objects into the given +// object storage. +func WritePackfileToObjectStorage( + sw storer.PackfileWriter, + packfile io.Reader, +) (err error) { + w, err := sw.PackfileWriter() + if err != nil { + return err + } + + defer ioutil.CheckClose(w, &err) + + var n int64 + n, err = io.Copy(w, packfile) + if err == nil && n == 0 { + return ErrEmptyPackfile + } + + return err +} + +var bufPool = sync.Pool{ + New: func() interface{} { + return bytes.NewBuffer(nil) + }, +} + +var zlibInitBytes = []byte{0x78, 0x9c, 0x01, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01} + +var zlibReaderPool = sync.Pool{ + New: func() interface{} { + r, _ := zlib.NewReader(bytes.NewReader(zlibInitBytes)) + return r + }, +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_index.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_index.go new file mode 100644 index 0000000000..07a61120e5 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_index.go @@ -0,0 +1,297 @@ +package packfile + +const blksz = 16 +const maxChainLength = 64 + +// deltaIndex is a modified version of JGit's DeltaIndex adapted to our current +// design. +type deltaIndex struct { + table []int + entries []int + mask int +} + +func (idx *deltaIndex) init(buf []byte) { + scanner := newDeltaIndexScanner(buf, len(buf)) + idx.mask = scanner.mask + idx.table = scanner.table + idx.entries = make([]int, countEntries(scanner)+1) + idx.copyEntries(scanner) +} + +// findMatch returns the offset of src where the block starting at tgtOffset +// is and the length of the match. A length of 0 means there was no match. A +// length of -1 means the src length is lower than the blksz and whatever +// other positive length is the length of the match in bytes. +func (idx *deltaIndex) findMatch(src, tgt []byte, tgtOffset int) (srcOffset, l int) { + if len(tgt) < tgtOffset+s { + return 0, len(tgt) - tgtOffset + } + + if len(src) < blksz { + return 0, -1 + } + + if len(tgt) >= tgtOffset+s && len(src) >= blksz { + h := hashBlock(tgt, tgtOffset) + tIdx := h & idx.mask + eIdx := idx.table[tIdx] + if eIdx != 0 { + srcOffset = idx.entries[eIdx] + } else { + return + } + + l = matchLength(src, tgt, tgtOffset, srcOffset) + } + + return +} + +func matchLength(src, tgt []byte, otgt, osrc int) (l int) { + lensrc := len(src) + lentgt := len(tgt) + for (osrc < lensrc && otgt < lentgt) && src[osrc] == tgt[otgt] { + l++ + osrc++ + otgt++ + } + return +} + +func countEntries(scan *deltaIndexScanner) (cnt int) { + // Figure out exactly how many entries we need. As we do the + // enumeration truncate any delta chains longer than what we + // are willing to scan during encode. This keeps the encode + // logic linear in the size of the input rather than quadratic. + for i := 0; i < len(scan.table); i++ { + h := scan.table[i] + if h == 0 { + continue + } + + size := 0 + for { + size++ + if size == maxChainLength { + scan.next[h] = 0 + break + } + h = scan.next[h] + + if h == 0 { + break + } + } + cnt += size + } + + return +} + +func (idx *deltaIndex) copyEntries(scanner *deltaIndexScanner) { + // Rebuild the entries list from the scanner, positioning all + // blocks in the same hash chain next to each other. We can + // then later discard the next list, along with the scanner. + // + next := 1 + for i := 0; i < len(idx.table); i++ { + h := idx.table[i] + if h == 0 { + continue + } + + idx.table[i] = next + for { + idx.entries[next] = scanner.entries[h] + next++ + h = scanner.next[h] + + if h == 0 { + break + } + } + } +} + +type deltaIndexScanner struct { + table []int + entries []int + next []int + mask int + count int +} + +func newDeltaIndexScanner(buf []byte, size int) *deltaIndexScanner { + size -= size % blksz + worstCaseBlockCnt := size / blksz + if worstCaseBlockCnt < 1 { + return new(deltaIndexScanner) + } + + tableSize := tableSize(worstCaseBlockCnt) + scanner := &deltaIndexScanner{ + table: make([]int, tableSize), + mask: tableSize - 1, + entries: make([]int, worstCaseBlockCnt+1), + next: make([]int, worstCaseBlockCnt+1), + } + + scanner.scan(buf, size) + return scanner +} + +// slightly modified version of JGit's DeltaIndexScanner. We store the offset on the entries +// instead of the entries and the key, so we avoid operations to retrieve the offset later, as +// we don't use the key. +// See: https://github.com/eclipse/jgit/blob/005e5feb4ecd08c4e4d141a38b9e7942accb3212/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/pack/DeltaIndexScanner.java +func (s *deltaIndexScanner) scan(buf []byte, end int) { + lastHash := 0 + ptr := end - blksz + + for { + key := hashBlock(buf, ptr) + tIdx := key & s.mask + head := s.table[tIdx] + if head != 0 && lastHash == key { + s.entries[head] = ptr + } else { + s.count++ + eIdx := s.count + s.entries[eIdx] = ptr + s.next[eIdx] = head + s.table[tIdx] = eIdx + } + + lastHash = key + ptr -= blksz + + if 0 > ptr { + break + } + } +} + +func tableSize(worstCaseBlockCnt int) int { + shift := 32 - leadingZeros(uint32(worstCaseBlockCnt)) + sz := 1 << uint(shift-1) + if sz < worstCaseBlockCnt { + sz <<= 1 + } + return sz +} + +// use https://golang.org/pkg/math/bits/#LeadingZeros32 in the future +func leadingZeros(x uint32) (n int) { + if x >= 1<<16 { + x >>= 16 + n = 16 + } + if x >= 1<<8 { + x >>= 8 + n += 8 + } + n += int(len8tab[x]) + return 32 - n +} + +var len8tab = [256]uint8{ + 0x00, 0x01, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, + 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, + 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, + 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, +} + +func hashBlock(raw []byte, ptr int) int { + // The first 4 steps collapse out into a 4 byte big-endian decode, + // with a larger right shift as we combined shift lefts together. + // + hash := ((uint32(raw[ptr]) & 0xff) << 24) | + ((uint32(raw[ptr+1]) & 0xff) << 16) | + ((uint32(raw[ptr+2]) & 0xff) << 8) | + (uint32(raw[ptr+3]) & 0xff) + hash ^= T[hash>>31] + + hash = ((hash << 8) | (uint32(raw[ptr+4]) & 0xff)) ^ T[hash>>23] + hash = ((hash << 8) | (uint32(raw[ptr+5]) & 0xff)) ^ T[hash>>23] + hash = ((hash << 8) | (uint32(raw[ptr+6]) & 0xff)) ^ T[hash>>23] + hash = ((hash << 8) | (uint32(raw[ptr+7]) & 0xff)) ^ T[hash>>23] + + hash = ((hash << 8) | (uint32(raw[ptr+8]) & 0xff)) ^ T[hash>>23] + hash = ((hash << 8) | (uint32(raw[ptr+9]) & 0xff)) ^ T[hash>>23] + hash = ((hash << 8) | (uint32(raw[ptr+10]) & 0xff)) ^ T[hash>>23] + hash = ((hash << 8) | (uint32(raw[ptr+11]) & 0xff)) ^ T[hash>>23] + + hash = ((hash << 8) | (uint32(raw[ptr+12]) & 0xff)) ^ T[hash>>23] + hash = ((hash << 8) | (uint32(raw[ptr+13]) & 0xff)) ^ T[hash>>23] + hash = ((hash << 8) | (uint32(raw[ptr+14]) & 0xff)) ^ T[hash>>23] + hash = ((hash << 8) | (uint32(raw[ptr+15]) & 0xff)) ^ T[hash>>23] + + return int(hash) +} + +var T = []uint32{0x00000000, 0xd4c6b32d, 0x7d4bd577, + 0xa98d665a, 0x2e5119c3, 0xfa97aaee, 0x531accb4, 0x87dc7f99, + 0x5ca23386, 0x886480ab, 0x21e9e6f1, 0xf52f55dc, 0x72f32a45, + 0xa6359968, 0x0fb8ff32, 0xdb7e4c1f, 0x6d82d421, 0xb944670c, + 0x10c90156, 0xc40fb27b, 0x43d3cde2, 0x97157ecf, 0x3e981895, + 0xea5eabb8, 0x3120e7a7, 0xe5e6548a, 0x4c6b32d0, 0x98ad81fd, + 0x1f71fe64, 0xcbb74d49, 0x623a2b13, 0xb6fc983e, 0x0fc31b6f, + 0xdb05a842, 0x7288ce18, 0xa64e7d35, 0x219202ac, 0xf554b181, + 0x5cd9d7db, 0x881f64f6, 0x536128e9, 0x87a79bc4, 0x2e2afd9e, + 0xfaec4eb3, 0x7d30312a, 0xa9f68207, 0x007be45d, 0xd4bd5770, + 0x6241cf4e, 0xb6877c63, 0x1f0a1a39, 0xcbcca914, 0x4c10d68d, + 0x98d665a0, 0x315b03fa, 0xe59db0d7, 0x3ee3fcc8, 0xea254fe5, + 0x43a829bf, 0x976e9a92, 0x10b2e50b, 0xc4745626, 0x6df9307c, + 0xb93f8351, 0x1f8636de, 0xcb4085f3, 0x62cde3a9, 0xb60b5084, + 0x31d72f1d, 0xe5119c30, 0x4c9cfa6a, 0x985a4947, 0x43240558, + 0x97e2b675, 0x3e6fd02f, 0xeaa96302, 0x6d751c9b, 0xb9b3afb6, + 0x103ec9ec, 0xc4f87ac1, 0x7204e2ff, 0xa6c251d2, 0x0f4f3788, + 0xdb8984a5, 0x5c55fb3c, 0x88934811, 0x211e2e4b, 0xf5d89d66, + 0x2ea6d179, 0xfa606254, 0x53ed040e, 0x872bb723, 0x00f7c8ba, + 0xd4317b97, 0x7dbc1dcd, 0xa97aaee0, 0x10452db1, 0xc4839e9c, + 0x6d0ef8c6, 0xb9c84beb, 0x3e143472, 0xead2875f, 0x435fe105, + 0x97995228, 0x4ce71e37, 0x9821ad1a, 0x31accb40, 0xe56a786d, + 0x62b607f4, 0xb670b4d9, 0x1ffdd283, 0xcb3b61ae, 0x7dc7f990, + 0xa9014abd, 0x008c2ce7, 0xd44a9fca, 0x5396e053, 0x8750537e, + 0x2edd3524, 0xfa1b8609, 0x2165ca16, 0xf5a3793b, 0x5c2e1f61, + 0x88e8ac4c, 0x0f34d3d5, 0xdbf260f8, 0x727f06a2, 0xa6b9b58f, + 0x3f0c6dbc, 0xebcade91, 0x4247b8cb, 0x96810be6, 0x115d747f, + 0xc59bc752, 0x6c16a108, 0xb8d01225, 0x63ae5e3a, 0xb768ed17, + 0x1ee58b4d, 0xca233860, 0x4dff47f9, 0x9939f4d4, 0x30b4928e, + 0xe47221a3, 0x528eb99d, 0x86480ab0, 0x2fc56cea, 0xfb03dfc7, + 0x7cdfa05e, 0xa8191373, 0x01947529, 0xd552c604, 0x0e2c8a1b, + 0xdaea3936, 0x73675f6c, 0xa7a1ec41, 0x207d93d8, 0xf4bb20f5, + 0x5d3646af, 0x89f0f582, 0x30cf76d3, 0xe409c5fe, 0x4d84a3a4, + 0x99421089, 0x1e9e6f10, 0xca58dc3d, 0x63d5ba67, 0xb713094a, + 0x6c6d4555, 0xb8abf678, 0x11269022, 0xc5e0230f, 0x423c5c96, + 0x96faefbb, 0x3f7789e1, 0xebb13acc, 0x5d4da2f2, 0x898b11df, + 0x20067785, 0xf4c0c4a8, 0x731cbb31, 0xa7da081c, 0x0e576e46, + 0xda91dd6b, 0x01ef9174, 0xd5292259, 0x7ca44403, 0xa862f72e, + 0x2fbe88b7, 0xfb783b9a, 0x52f55dc0, 0x8633eeed, 0x208a5b62, + 0xf44ce84f, 0x5dc18e15, 0x89073d38, 0x0edb42a1, 0xda1df18c, + 0x739097d6, 0xa75624fb, 0x7c2868e4, 0xa8eedbc9, 0x0163bd93, + 0xd5a50ebe, 0x52797127, 0x86bfc20a, 0x2f32a450, 0xfbf4177d, + 0x4d088f43, 0x99ce3c6e, 0x30435a34, 0xe485e919, 0x63599680, + 0xb79f25ad, 0x1e1243f7, 0xcad4f0da, 0x11aabcc5, 0xc56c0fe8, + 0x6ce169b2, 0xb827da9f, 0x3ffba506, 0xeb3d162b, 0x42b07071, + 0x9676c35c, 0x2f49400d, 0xfb8ff320, 0x5202957a, 0x86c42657, + 0x011859ce, 0xd5deeae3, 0x7c538cb9, 0xa8953f94, 0x73eb738b, + 0xa72dc0a6, 0x0ea0a6fc, 0xda6615d1, 0x5dba6a48, 0x897cd965, + 0x20f1bf3f, 0xf4370c12, 0x42cb942c, 0x960d2701, 0x3f80415b, + 0xeb46f276, 0x6c9a8def, 0xb85c3ec2, 0x11d15898, 0xc517ebb5, + 0x1e69a7aa, 0xcaaf1487, 0x632272dd, 0xb7e4c1f0, 0x3038be69, + 0xe4fe0d44, 0x4d736b1e, 0x99b5d833, +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_selector.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_selector.go new file mode 100644 index 0000000000..4b60ff3947 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_selector.go @@ -0,0 +1,369 @@ +package packfile + +import ( + "sort" + "sync" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/storer" +) + +const ( + // deltas based on deltas, how many steps we can do. + // 50 is the default value used in JGit + maxDepth = int64(50) +) + +// applyDelta is the set of object types that we should apply deltas +var applyDelta = map[plumbing.ObjectType]bool{ + plumbing.BlobObject: true, + plumbing.TreeObject: true, +} + +type deltaSelector struct { + storer storer.EncodedObjectStorer +} + +func newDeltaSelector(s storer.EncodedObjectStorer) *deltaSelector { + return &deltaSelector{s} +} + +// ObjectsToPack creates a list of ObjectToPack from the hashes +// provided, creating deltas if it's suitable, using an specific +// internal logic. `packWindow` specifies the size of the sliding +// window used to compare objects for delta compression; 0 turns off +// delta compression entirely. +func (dw *deltaSelector) ObjectsToPack( + hashes []plumbing.Hash, + packWindow uint, +) ([]*ObjectToPack, error) { + otp, err := dw.objectsToPack(hashes, packWindow) + if err != nil { + return nil, err + } + + if packWindow == 0 { + return otp, nil + } + + dw.sort(otp) + + var objectGroups [][]*ObjectToPack + var prev *ObjectToPack + i := -1 + for _, obj := range otp { + if prev == nil || prev.Type() != obj.Type() { + objectGroups = append(objectGroups, []*ObjectToPack{obj}) + i++ + prev = obj + } else { + objectGroups[i] = append(objectGroups[i], obj) + } + } + + var wg sync.WaitGroup + var once sync.Once + for _, objs := range objectGroups { + objs := objs + wg.Add(1) + go func() { + if walkErr := dw.walk(objs, packWindow); walkErr != nil { + once.Do(func() { + err = walkErr + }) + } + wg.Done() + }() + } + wg.Wait() + + if err != nil { + return nil, err + } + + return otp, nil +} + +func (dw *deltaSelector) objectsToPack( + hashes []plumbing.Hash, + packWindow uint, +) ([]*ObjectToPack, error) { + var objectsToPack []*ObjectToPack + for _, h := range hashes { + var o plumbing.EncodedObject + var err error + if packWindow == 0 { + o, err = dw.encodedObject(h) + } else { + o, err = dw.encodedDeltaObject(h) + } + if err != nil { + return nil, err + } + + otp := newObjectToPack(o) + if _, ok := o.(plumbing.DeltaObject); ok { + otp.CleanOriginal() + } + + objectsToPack = append(objectsToPack, otp) + } + + if packWindow == 0 { + return objectsToPack, nil + } + + if err := dw.fixAndBreakChains(objectsToPack); err != nil { + return nil, err + } + + return objectsToPack, nil +} + +func (dw *deltaSelector) encodedDeltaObject(h plumbing.Hash) (plumbing.EncodedObject, error) { + edos, ok := dw.storer.(storer.DeltaObjectStorer) + if !ok { + return dw.encodedObject(h) + } + + return edos.DeltaObject(plumbing.AnyObject, h) +} + +func (dw *deltaSelector) encodedObject(h plumbing.Hash) (plumbing.EncodedObject, error) { + return dw.storer.EncodedObject(plumbing.AnyObject, h) +} + +func (dw *deltaSelector) fixAndBreakChains(objectsToPack []*ObjectToPack) error { + m := make(map[plumbing.Hash]*ObjectToPack, len(objectsToPack)) + for _, otp := range objectsToPack { + m[otp.Hash()] = otp + } + + for _, otp := range objectsToPack { + if err := dw.fixAndBreakChainsOne(m, otp); err != nil { + return err + } + } + + return nil +} + +func (dw *deltaSelector) fixAndBreakChainsOne(objectsToPack map[plumbing.Hash]*ObjectToPack, otp *ObjectToPack) error { + if !otp.Object.Type().IsDelta() { + return nil + } + + // Initial ObjectToPack instances might have a delta assigned to Object + // but no actual base initially. Once Base is assigned to a delta, it means + // we already fixed it. + if otp.Base != nil { + return nil + } + + do, ok := otp.Object.(plumbing.DeltaObject) + if !ok { + // if this is not a DeltaObject, then we cannot retrieve its base, + // so we have to break the delta chain here. + return dw.undeltify(otp) + } + + base, ok := objectsToPack[do.BaseHash()] + if !ok { + // The base of the delta is not in our list of objects to pack, so + // we break the chain. + return dw.undeltify(otp) + } + + if err := dw.fixAndBreakChainsOne(objectsToPack, base); err != nil { + return err + } + + otp.SetDelta(base, otp.Object) + return nil +} + +func (dw *deltaSelector) restoreOriginal(otp *ObjectToPack) error { + if otp.Original != nil { + return nil + } + + if !otp.Object.Type().IsDelta() { + return nil + } + + obj, err := dw.encodedObject(otp.Hash()) + if err != nil { + return err + } + + otp.SetOriginal(obj) + + return nil +} + +// undeltify undeltifies an *ObjectToPack by retrieving the original object from +// the storer and resetting it. +func (dw *deltaSelector) undeltify(otp *ObjectToPack) error { + if err := dw.restoreOriginal(otp); err != nil { + return err + } + + otp.Object = otp.Original + otp.Depth = 0 + return nil +} + +func (dw *deltaSelector) sort(objectsToPack []*ObjectToPack) { + sort.Sort(byTypeAndSize(objectsToPack)) +} + +func (dw *deltaSelector) walk( + objectsToPack []*ObjectToPack, + packWindow uint, +) error { + indexMap := make(map[plumbing.Hash]*deltaIndex) + for i := 0; i < len(objectsToPack); i++ { + // Clean up the index map and reconstructed delta objects for anything + // outside our pack window, to save memory. + if i > int(packWindow) { + obj := objectsToPack[i-int(packWindow)] + + delete(indexMap, obj.Hash()) + + if obj.IsDelta() { + obj.SaveOriginalMetadata() + obj.CleanOriginal() + } + } + + target := objectsToPack[i] + + // If we already have a delta, we don't try to find a new one for this + // object. This happens when a delta is set to be reused from an existing + // packfile. + if target.IsDelta() { + continue + } + + // We only want to create deltas from specific types. + if !applyDelta[target.Type()] { + continue + } + + for j := i - 1; j >= 0 && i-j < int(packWindow); j-- { + base := objectsToPack[j] + // Objects must use only the same type as their delta base. + // Since objectsToPack is sorted by type and size, once we find + // a different type, we know we won't find more of them. + if base.Type() != target.Type() { + break + } + + if err := dw.tryToDeltify(indexMap, base, target); err != nil { + return err + } + } + } + + return nil +} + +func (dw *deltaSelector) tryToDeltify(indexMap map[plumbing.Hash]*deltaIndex, base, target *ObjectToPack) error { + // Original object might not be present if we're reusing a delta, so we + // ensure it is restored. + if err := dw.restoreOriginal(target); err != nil { + return err + } + + if err := dw.restoreOriginal(base); err != nil { + return err + } + + // If the sizes are radically different, this is a bad pairing. + if target.Size() < base.Size()>>4 { + return nil + } + + msz := dw.deltaSizeLimit( + target.Object.Size(), + base.Depth, + target.Depth, + target.IsDelta(), + ) + + // Nearly impossible to fit useful delta. + if msz <= 8 { + return nil + } + + // If we have to insert a lot to make this work, find another. + if base.Size()-target.Size() > msz { + return nil + } + + if _, ok := indexMap[base.Hash()]; !ok { + indexMap[base.Hash()] = new(deltaIndex) + } + + // Now we can generate the delta using originals + delta, err := getDelta(indexMap[base.Hash()], base.Original, target.Original) + if err != nil { + return err + } + + // if delta better than target + if delta.Size() < msz { + target.SetDelta(base, delta) + } + + return nil +} + +func (dw *deltaSelector) deltaSizeLimit(targetSize int64, baseDepth int, + targetDepth int, targetDelta bool) int64 { + if !targetDelta { + // Any delta should be no more than 50% of the original size + // (for text files deflate of whole form should shrink 50%). + n := targetSize >> 1 + + // Evenly distribute delta size limits over allowed depth. + // If src is non-delta (depth = 0), delta <= 50% of original. + // If src is almost at limit (9/10), delta <= 10% of original. + return n * (maxDepth - int64(baseDepth)) / maxDepth + } + + // With a delta base chosen any new delta must be "better". + // Retain the distribution described above. + d := int64(targetDepth) + n := targetSize + + // If target depth is bigger than maxDepth, this delta is not suitable to be used. + if d >= maxDepth { + return 0 + } + + // If src is whole (depth=0) and base is near limit (depth=9/10) + // any delta using src can be 10x larger and still be better. + // + // If src is near limit (depth=9/10) and base is whole (depth=0) + // a new delta dependent on src must be 1/10th the size. + return n * (maxDepth - int64(baseDepth)) / (maxDepth - d) +} + +type byTypeAndSize []*ObjectToPack + +func (a byTypeAndSize) Len() int { return len(a) } + +func (a byTypeAndSize) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +func (a byTypeAndSize) Less(i, j int) bool { + if a[i].Type() < a[j].Type() { + return false + } + + if a[i].Type() > a[j].Type() { + return true + } + + return a[i].Size() > a[j].Size() +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/diff_delta.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/diff_delta.go new file mode 100644 index 0000000000..1951b34ef1 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/diff_delta.go @@ -0,0 +1,204 @@ +package packfile + +import ( + "bytes" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +// See https://github.com/jelmer/dulwich/blob/master/dulwich/pack.py and +// https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js +// for more info + +const ( + // Standard chunk size used to generate fingerprints + s = 16 + + // https://github.com/git/git/blob/f7466e94375b3be27f229c78873f0acf8301c0a5/diff-delta.c#L428 + // Max size of a copy operation (64KB) + maxCopySize = 64 * 1024 +) + +// GetDelta returns an EncodedObject of type OFSDeltaObject. Base and Target object, +// will be loaded into memory to be able to create the delta object. +// To generate target again, you will need the obtained object and "base" one. +// Error will be returned if base or target object cannot be read. +func GetDelta(base, target plumbing.EncodedObject) (plumbing.EncodedObject, error) { + return getDelta(new(deltaIndex), base, target) +} + +func getDelta(index *deltaIndex, base, target plumbing.EncodedObject) (o plumbing.EncodedObject, err error) { + br, err := base.Reader() + if err != nil { + return nil, err + } + + defer ioutil.CheckClose(br, &err) + + tr, err := target.Reader() + if err != nil { + return nil, err + } + + defer ioutil.CheckClose(tr, &err) + + bb := bufPool.Get().(*bytes.Buffer) + defer bufPool.Put(bb) + bb.Reset() + + _, err = bb.ReadFrom(br) + if err != nil { + return nil, err + } + + tb := bufPool.Get().(*bytes.Buffer) + defer bufPool.Put(tb) + tb.Reset() + + _, err = tb.ReadFrom(tr) + if err != nil { + return nil, err + } + + db := diffDelta(index, bb.Bytes(), tb.Bytes()) + delta := &plumbing.MemoryObject{} + _, err = delta.Write(db) + if err != nil { + return nil, err + } + + delta.SetSize(int64(len(db))) + delta.SetType(plumbing.OFSDeltaObject) + + return delta, nil +} + +// DiffDelta returns the delta that transforms src into tgt. +func DiffDelta(src, tgt []byte) []byte { + return diffDelta(new(deltaIndex), src, tgt) +} + +func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte { + buf := bufPool.Get().(*bytes.Buffer) + defer bufPool.Put(buf) + buf.Reset() + buf.Write(deltaEncodeSize(len(src))) + buf.Write(deltaEncodeSize(len(tgt))) + + if len(index.entries) == 0 { + index.init(src) + } + + ibuf := bufPool.Get().(*bytes.Buffer) + defer bufPool.Put(ibuf) + ibuf.Reset() + for i := 0; i < len(tgt); i++ { + offset, l := index.findMatch(src, tgt, i) + + if l == 0 { + // couldn't find a match, just write the current byte and continue + ibuf.WriteByte(tgt[i]) + } else if l < 0 { + // src is less than blksz, copy the rest of the target to avoid + // calls to findMatch + for ; i < len(tgt); i++ { + ibuf.WriteByte(tgt[i]) + } + } else if l < s { + // remaining target is less than blksz, copy what's left of it + // and avoid calls to findMatch + for j := i; j < i+l; j++ { + ibuf.WriteByte(tgt[j]) + } + i += l - 1 + } else { + encodeInsertOperation(ibuf, buf) + + rl := l + aOffset := offset + for rl > 0 { + if rl < maxCopySize { + buf.Write(encodeCopyOperation(aOffset, rl)) + break + } + + buf.Write(encodeCopyOperation(aOffset, maxCopySize)) + rl -= maxCopySize + aOffset += maxCopySize + } + + i += l - 1 + } + } + + encodeInsertOperation(ibuf, buf) + + // buf.Bytes() is only valid until the next modifying operation on the buffer. Copy it. + return append([]byte{}, buf.Bytes()...) +} + +func encodeInsertOperation(ibuf, buf *bytes.Buffer) { + if ibuf.Len() == 0 { + return + } + + b := ibuf.Bytes() + s := ibuf.Len() + o := 0 + for { + if s <= 127 { + break + } + buf.WriteByte(byte(127)) + buf.Write(b[o : o+127]) + s -= 127 + o += 127 + } + buf.WriteByte(byte(s)) + buf.Write(b[o : o+s]) + + ibuf.Reset() +} + +func deltaEncodeSize(size int) []byte { + var ret []byte + c := size & 0x7f + size >>= 7 + for { + if size == 0 { + break + } + + ret = append(ret, byte(c|0x80)) + c = size & 0x7f + size >>= 7 + } + ret = append(ret, byte(c)) + + return ret +} + +func encodeCopyOperation(offset, length int) []byte { + code := 0x80 + var opcodes []byte + + var i uint + for i = 0; i < 4; i++ { + f := 0xff << (i * 8) + if offset&f != 0 { + opcodes = append(opcodes, byte(offset&f>>(i*8))) + code |= 0x01 << i + } + } + + for i = 0; i < 3; i++ { + f := 0xff << (i * 8) + if length&f != 0 { + opcodes = append(opcodes, byte(length&f>>(i*8))) + code |= 0x10 << i + } + } + + return append([]byte{byte(code)}, opcodes...) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/doc.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/doc.go new file mode 100644 index 0000000000..2882a7f378 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/doc.go @@ -0,0 +1,39 @@ +// Package packfile implements encoding and decoding of packfile format. +// +// == pack-*.pack files have the following format: +// +// - A header appears at the beginning and consists of the following: +// +// 4-byte signature: +// The signature is: {'P', 'A', 'C', 'K'} +// +// 4-byte version number (network byte order): +// GIT currently accepts version number 2 or 3 but +// generates version 2 only. +// +// 4-byte number of objects contained in the pack (network byte order) +// +// Observation: we cannot have more than 4G versions ;-) and +// more than 4G objects in a pack. +// +// - The header is followed by number of object entries, each of +// which looks like this: +// +// (undeltified representation) +// n-byte type and length (3-bit type, (n-1)*7+4-bit length) +// compressed data +// +// (deltified representation) +// n-byte type and length (3-bit type, (n-1)*7+4-bit length) +// 20-byte base object name +// compressed delta data +// +// Observation: length of each object is encoded in a variable +// length format and is not constrained to 32-bit or anything. +// +// - The trailer records 20-byte SHA1 checksum of all of the above. +// +// +// Source: +// https://www.kernel.org/pub/software/scm/git/docs/v1.7.5/technical/pack-protocol.txt +package packfile diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/encoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/encoder.go new file mode 100644 index 0000000000..5501f8861c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/encoder.go @@ -0,0 +1,225 @@ +package packfile + +import ( + "compress/zlib" + "crypto/sha1" + "fmt" + "io" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/utils/binary" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +// Encoder gets the data from the storage and write it into the writer in PACK +// format +type Encoder struct { + selector *deltaSelector + w *offsetWriter + zw *zlib.Writer + hasher plumbing.Hasher + + useRefDeltas bool +} + +// NewEncoder creates a new packfile encoder using a specific Writer and +// EncodedObjectStorer. By default deltas used to generate the packfile will be +// OFSDeltaObject. To use Reference deltas, set useRefDeltas to true. +func NewEncoder(w io.Writer, s storer.EncodedObjectStorer, useRefDeltas bool) *Encoder { + h := plumbing.Hasher{ + Hash: sha1.New(), + } + mw := io.MultiWriter(w, h) + ow := newOffsetWriter(mw) + zw := zlib.NewWriter(mw) + return &Encoder{ + selector: newDeltaSelector(s), + w: ow, + zw: zw, + hasher: h, + useRefDeltas: useRefDeltas, + } +} + +// Encode creates a packfile containing all the objects referenced in +// hashes and writes it to the writer in the Encoder. `packWindow` +// specifies the size of the sliding window used to compare objects +// for delta compression; 0 turns off delta compression entirely. +func (e *Encoder) Encode( + hashes []plumbing.Hash, + packWindow uint, +) (plumbing.Hash, error) { + objects, err := e.selector.ObjectsToPack(hashes, packWindow) + if err != nil { + return plumbing.ZeroHash, err + } + + return e.encode(objects) +} + +func (e *Encoder) encode(objects []*ObjectToPack) (plumbing.Hash, error) { + if err := e.head(len(objects)); err != nil { + return plumbing.ZeroHash, err + } + + for _, o := range objects { + if err := e.entry(o); err != nil { + return plumbing.ZeroHash, err + } + } + + return e.footer() +} + +func (e *Encoder) head(numEntries int) error { + return binary.Write( + e.w, + signature, + int32(VersionSupported), + int32(numEntries), + ) +} + +func (e *Encoder) entry(o *ObjectToPack) (err error) { + if o.WantWrite() { + // A cycle exists in this delta chain. This should only occur if a + // selected object representation disappeared during writing + // (for example due to a concurrent repack) and a different base + // was chosen, forcing a cycle. Select something other than a + // delta, and write this object. + e.selector.restoreOriginal(o) + o.BackToOriginal() + } + + if o.IsWritten() { + return nil + } + + o.MarkWantWrite() + + if err := e.writeBaseIfDelta(o); err != nil { + return err + } + + // We need to check if we already write that object due a cyclic delta chain + if o.IsWritten() { + return nil + } + + o.Offset = e.w.Offset() + + if o.IsDelta() { + if err := e.writeDeltaHeader(o); err != nil { + return err + } + } else { + if err := e.entryHead(o.Type(), o.Size()); err != nil { + return err + } + } + + e.zw.Reset(e.w) + + defer ioutil.CheckClose(e.zw, &err) + + or, err := o.Object.Reader() + if err != nil { + return err + } + + defer ioutil.CheckClose(or, &err) + + _, err = io.Copy(e.zw, or) + if err != nil { + return err + } + + return nil +} + +func (e *Encoder) writeBaseIfDelta(o *ObjectToPack) error { + if o.IsDelta() && !o.Base.IsWritten() { + // We must write base first + return e.entry(o.Base) + } + + return nil +} + +func (e *Encoder) writeDeltaHeader(o *ObjectToPack) error { + // Write offset deltas by default + t := plumbing.OFSDeltaObject + if e.useRefDeltas { + t = plumbing.REFDeltaObject + } + + if err := e.entryHead(t, o.Object.Size()); err != nil { + return err + } + + if e.useRefDeltas { + return e.writeRefDeltaHeader(o.Base.Hash()) + } else { + return e.writeOfsDeltaHeader(o) + } +} + +func (e *Encoder) writeRefDeltaHeader(base plumbing.Hash) error { + return binary.Write(e.w, base) +} + +func (e *Encoder) writeOfsDeltaHeader(o *ObjectToPack) error { + // for OFS_DELTA, offset of the base is interpreted as negative offset + // relative to the type-byte of the header of the ofs-delta entry. + relativeOffset := o.Offset - o.Base.Offset + if relativeOffset <= 0 { + return fmt.Errorf("bad offset for OFS_DELTA entry: %d", relativeOffset) + } + + return binary.WriteVariableWidthInt(e.w, relativeOffset) +} + +func (e *Encoder) entryHead(typeNum plumbing.ObjectType, size int64) error { + t := int64(typeNum) + header := []byte{} + c := (t << firstLengthBits) | (size & maskFirstLength) + size >>= firstLengthBits + for { + if size == 0 { + break + } + header = append(header, byte(c|maskContinue)) + c = size & int64(maskLength) + size >>= lengthBits + } + + header = append(header, byte(c)) + _, err := e.w.Write(header) + + return err +} + +func (e *Encoder) footer() (plumbing.Hash, error) { + h := e.hasher.Sum() + return h, binary.Write(e.w, h) +} + +type offsetWriter struct { + w io.Writer + offset int64 +} + +func newOffsetWriter(w io.Writer) *offsetWriter { + return &offsetWriter{w: w} +} + +func (ow *offsetWriter) Write(p []byte) (n int, err error) { + n, err = ow.w.Write(p) + ow.offset += int64(n) + return n, err +} + +func (ow *offsetWriter) Offset() int64 { + return ow.offset +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/error.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/error.go new file mode 100644 index 0000000000..c0b9163313 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/error.go @@ -0,0 +1,30 @@ +package packfile + +import "fmt" + +// Error specifies errors returned during packfile parsing. +type Error struct { + reason, details string +} + +// NewError returns a new error. +func NewError(reason string) *Error { + return &Error{reason: reason} +} + +// Error returns a text representation of the error. +func (e *Error) Error() string { + if e.details == "" { + return e.reason + } + + return fmt.Sprintf("%s: %s", e.reason, e.details) +} + +// AddDetails adds details to an error, with additional text. +func (e *Error) AddDetails(format string, args ...interface{}) *Error { + return &Error{ + reason: e.reason, + details: fmt.Sprintf(format, args...), + } +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/fsobject.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/fsobject.go new file mode 100644 index 0000000000..c5edaf52ee --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/fsobject.go @@ -0,0 +1,116 @@ +package packfile + +import ( + "io" + + billy "github.com/go-git/go-billy/v5" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/cache" + "github.com/go-git/go-git/v5/plumbing/format/idxfile" +) + +// FSObject is an object from the packfile on the filesystem. +type FSObject struct { + hash plumbing.Hash + h *ObjectHeader + offset int64 + size int64 + typ plumbing.ObjectType + index idxfile.Index + fs billy.Filesystem + path string + cache cache.Object +} + +// NewFSObject creates a new filesystem object. +func NewFSObject( + hash plumbing.Hash, + finalType plumbing.ObjectType, + offset int64, + contentSize int64, + index idxfile.Index, + fs billy.Filesystem, + path string, + cache cache.Object, +) *FSObject { + return &FSObject{ + hash: hash, + offset: offset, + size: contentSize, + typ: finalType, + index: index, + fs: fs, + path: path, + cache: cache, + } +} + +// Reader implements the plumbing.EncodedObject interface. +func (o *FSObject) Reader() (io.ReadCloser, error) { + obj, ok := o.cache.Get(o.hash) + if ok && obj != o { + reader, err := obj.Reader() + if err != nil { + return nil, err + } + + return reader, nil + } + + f, err := o.fs.Open(o.path) + if err != nil { + return nil, err + } + + p := NewPackfileWithCache(o.index, nil, f, o.cache) + r, err := p.getObjectContent(o.offset) + if err != nil { + _ = f.Close() + return nil, err + } + + if err := f.Close(); err != nil { + return nil, err + } + + return r, nil +} + +// SetSize implements the plumbing.EncodedObject interface. This method +// is a noop. +func (o *FSObject) SetSize(int64) {} + +// SetType implements the plumbing.EncodedObject interface. This method is +// a noop. +func (o *FSObject) SetType(plumbing.ObjectType) {} + +// Hash implements the plumbing.EncodedObject interface. +func (o *FSObject) Hash() plumbing.Hash { return o.hash } + +// Size implements the plumbing.EncodedObject interface. +func (o *FSObject) Size() int64 { return o.size } + +// Type implements the plumbing.EncodedObject interface. +func (o *FSObject) Type() plumbing.ObjectType { + return o.typ +} + +// Writer implements the plumbing.EncodedObject interface. This method always +// returns a nil writer. +func (o *FSObject) Writer() (io.WriteCloser, error) { + return nil, nil +} + +type objectReader struct { + io.ReadCloser + f billy.File +} + +func (r *objectReader) Close() error { + if err := r.ReadCloser.Close(); err != nil { + _ = r.f.Close() + return err + } + + return r.f.Close() +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/object_pack.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/object_pack.go new file mode 100644 index 0000000000..8ce29ef8ba --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/object_pack.go @@ -0,0 +1,164 @@ +package packfile + +import ( + "github.com/go-git/go-git/v5/plumbing" +) + +// ObjectToPack is a representation of an object that is going to be into a +// pack file. +type ObjectToPack struct { + // The main object to pack, it could be any object, including deltas + Object plumbing.EncodedObject + // Base is the object that a delta is based on (it could be also another delta). + // If the main object is not a delta, Base will be null + Base *ObjectToPack + // Original is the object that we can generate applying the delta to + // Base, or the same object as Object in the case of a non-delta + // object. + Original plumbing.EncodedObject + // Depth is the amount of deltas needed to resolve to obtain Original + // (delta based on delta based on ...) + Depth int + + // offset in pack when object has been already written, or 0 if it + // has not been written yet + Offset int64 + + // Information from the original object + resolvedOriginal bool + originalType plumbing.ObjectType + originalSize int64 + originalHash plumbing.Hash +} + +// newObjectToPack creates a correct ObjectToPack based on a non-delta object +func newObjectToPack(o plumbing.EncodedObject) *ObjectToPack { + return &ObjectToPack{ + Object: o, + Original: o, + } +} + +// newDeltaObjectToPack creates a correct ObjectToPack for a delta object, based on +// his base (could be another delta), the delta target (in this case called original), +// and the delta Object itself +func newDeltaObjectToPack(base *ObjectToPack, original, delta plumbing.EncodedObject) *ObjectToPack { + return &ObjectToPack{ + Object: delta, + Base: base, + Original: original, + Depth: base.Depth + 1, + } +} + +// BackToOriginal converts that ObjectToPack to a non-deltified object if it was one +func (o *ObjectToPack) BackToOriginal() { + if o.IsDelta() && o.Original != nil { + o.Object = o.Original + o.Base = nil + o.Depth = 0 + } +} + +// IsWritten returns if that ObjectToPack was +// already written into the packfile or not +func (o *ObjectToPack) IsWritten() bool { + return o.Offset > 1 +} + +// MarkWantWrite marks this ObjectToPack as WantWrite +// to avoid delta chain loops +func (o *ObjectToPack) MarkWantWrite() { + o.Offset = 1 +} + +// WantWrite checks if this ObjectToPack was marked as WantWrite before +func (o *ObjectToPack) WantWrite() bool { + return o.Offset == 1 +} + +// SetOriginal sets both Original and saves size, type and hash. If object +// is nil Original is set but previous resolved values are kept +func (o *ObjectToPack) SetOriginal(obj plumbing.EncodedObject) { + o.Original = obj + o.SaveOriginalMetadata() +} + +// SaveOriginalMetadata saves size, type and hash of Original object +func (o *ObjectToPack) SaveOriginalMetadata() { + if o.Original != nil { + o.originalSize = o.Original.Size() + o.originalType = o.Original.Type() + o.originalHash = o.Original.Hash() + o.resolvedOriginal = true + } +} + +// CleanOriginal sets Original to nil +func (o *ObjectToPack) CleanOriginal() { + o.Original = nil +} + +func (o *ObjectToPack) Type() plumbing.ObjectType { + if o.Original != nil { + return o.Original.Type() + } + + if o.resolvedOriginal { + return o.originalType + } + + if o.Base != nil { + return o.Base.Type() + } + + if o.Object != nil { + return o.Object.Type() + } + + panic("cannot get type") +} + +func (o *ObjectToPack) Hash() plumbing.Hash { + if o.Original != nil { + return o.Original.Hash() + } + + if o.resolvedOriginal { + return o.originalHash + } + + do, ok := o.Object.(plumbing.DeltaObject) + if ok { + return do.ActualHash() + } + + panic("cannot get hash") +} + +func (o *ObjectToPack) Size() int64 { + if o.Original != nil { + return o.Original.Size() + } + + if o.resolvedOriginal { + return o.originalSize + } + + do, ok := o.Object.(plumbing.DeltaObject) + if ok { + return do.ActualSize() + } + + panic("cannot get ObjectToPack size") +} + +func (o *ObjectToPack) IsDelta() bool { + return o.Base != nil +} + +func (o *ObjectToPack) SetDelta(base *ObjectToPack, delta plumbing.EncodedObject) { + o.Object = delta + o.Base = base + o.Depth = base.Depth + 1 +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/packfile.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/packfile.go new file mode 100644 index 0000000000..ddd7f62fce --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/packfile.go @@ -0,0 +1,565 @@ +package packfile + +import ( + "bytes" + "io" + "os" + + billy "github.com/go-git/go-billy/v5" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/cache" + "github.com/go-git/go-git/v5/plumbing/format/idxfile" + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +var ( + // ErrInvalidObject is returned by Decode when an invalid object is + // found in the packfile. + ErrInvalidObject = NewError("invalid git object") + // ErrZLib is returned by Decode when there was an error unzipping + // the packfile contents. + ErrZLib = NewError("zlib reading error") +) + +// When reading small objects from packfile it is beneficial to do so at +// once to exploit the buffered I/O. In many cases the objects are so small +// that they were already loaded to memory when the object header was +// loaded from the packfile. Wrapping in FSObject would cause this buffered +// data to be thrown away and then re-read later, with the additional +// seeking causing reloads from disk. Objects smaller than this threshold +// are now always read into memory and stored in cache instead of being +// wrapped in FSObject. +const smallObjectThreshold = 16 * 1024 + +// Packfile allows retrieving information from inside a packfile. +type Packfile struct { + idxfile.Index + fs billy.Filesystem + file billy.File + s *Scanner + deltaBaseCache cache.Object + offsetToType map[int64]plumbing.ObjectType +} + +// NewPackfileWithCache creates a new Packfile with the given object cache. +// If the filesystem is provided, the packfile will return FSObjects, otherwise +// it will return MemoryObjects. +func NewPackfileWithCache( + index idxfile.Index, + fs billy.Filesystem, + file billy.File, + cache cache.Object, +) *Packfile { + s := NewScanner(file) + return &Packfile{ + index, + fs, + file, + s, + cache, + make(map[int64]plumbing.ObjectType), + } +} + +// NewPackfile returns a packfile representation for the given packfile file +// and packfile idx. +// If the filesystem is provided, the packfile will return FSObjects, otherwise +// it will return MemoryObjects. +func NewPackfile(index idxfile.Index, fs billy.Filesystem, file billy.File) *Packfile { + return NewPackfileWithCache(index, fs, file, cache.NewObjectLRUDefault()) +} + +// Get retrieves the encoded object in the packfile with the given hash. +func (p *Packfile) Get(h plumbing.Hash) (plumbing.EncodedObject, error) { + offset, err := p.FindOffset(h) + if err != nil { + return nil, err + } + + return p.objectAtOffset(offset, h) +} + +// GetByOffset retrieves the encoded object from the packfile at the given +// offset. +func (p *Packfile) GetByOffset(o int64) (plumbing.EncodedObject, error) { + hash, err := p.FindHash(o) + if err != nil { + return nil, err + } + + return p.objectAtOffset(o, hash) +} + +// GetSizeByOffset retrieves the size of the encoded object from the +// packfile with the given offset. +func (p *Packfile) GetSizeByOffset(o int64) (size int64, err error) { + if _, err := p.s.SeekFromStart(o); err != nil { + if err == io.EOF || isInvalid(err) { + return 0, plumbing.ErrObjectNotFound + } + + return 0, err + } + + h, err := p.nextObjectHeader() + if err != nil { + return 0, err + } + return p.getObjectSize(h) +} + +func (p *Packfile) objectHeaderAtOffset(offset int64) (*ObjectHeader, error) { + h, err := p.s.SeekObjectHeader(offset) + p.s.pendingObject = nil + return h, err +} + +func (p *Packfile) nextObjectHeader() (*ObjectHeader, error) { + h, err := p.s.NextObjectHeader() + p.s.pendingObject = nil + return h, err +} + +func (p *Packfile) getDeltaObjectSize(buf *bytes.Buffer) int64 { + delta := buf.Bytes() + _, delta = decodeLEB128(delta) // skip src size + sz, _ := decodeLEB128(delta) + return int64(sz) +} + +func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) { + switch h.Type { + case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject: + return h.Length, nil + case plumbing.REFDeltaObject, plumbing.OFSDeltaObject: + buf := bufPool.Get().(*bytes.Buffer) + defer bufPool.Put(buf) + buf.Reset() + + if _, _, err := p.s.NextObject(buf); err != nil { + return 0, err + } + + return p.getDeltaObjectSize(buf), nil + default: + return 0, ErrInvalidObject.AddDetails("type %q", h.Type) + } +} + +func (p *Packfile) getObjectType(h *ObjectHeader) (typ plumbing.ObjectType, err error) { + switch h.Type { + case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject: + return h.Type, nil + case plumbing.REFDeltaObject, plumbing.OFSDeltaObject: + var offset int64 + if h.Type == plumbing.REFDeltaObject { + offset, err = p.FindOffset(h.Reference) + if err != nil { + return + } + } else { + offset = h.OffsetReference + } + + if baseType, ok := p.offsetToType[offset]; ok { + typ = baseType + } else { + h, err = p.objectHeaderAtOffset(offset) + if err != nil { + return + } + + typ, err = p.getObjectType(h) + if err != nil { + return + } + } + default: + err = ErrInvalidObject.AddDetails("type %q", h.Type) + } + + p.offsetToType[h.Offset] = typ + + return +} + +func (p *Packfile) objectAtOffset(offset int64, hash plumbing.Hash) (plumbing.EncodedObject, error) { + if obj, ok := p.cacheGet(hash); ok { + return obj, nil + } + + h, err := p.objectHeaderAtOffset(offset) + if err != nil { + if err == io.EOF || isInvalid(err) { + return nil, plumbing.ErrObjectNotFound + } + return nil, err + } + + return p.getNextObject(h, hash) +} + +func (p *Packfile) getNextObject(h *ObjectHeader, hash plumbing.Hash) (plumbing.EncodedObject, error) { + var err error + + // If we have no filesystem, we will return a MemoryObject instead + // of an FSObject. + if p.fs == nil { + return p.getNextMemoryObject(h) + } + + // If the object is small enough then read it completely into memory now since + // it is already read from disk into buffer anyway. For delta objects we want + // to perform the optimization too, but we have to be careful about applying + // small deltas on big objects. + var size int64 + if h.Length <= smallObjectThreshold { + if h.Type != plumbing.OFSDeltaObject && h.Type != plumbing.REFDeltaObject { + return p.getNextMemoryObject(h) + } + + // For delta objects we read the delta data and apply the small object + // optimization only if the expanded version of the object still meets + // the small object threshold condition. + buf := bufPool.Get().(*bytes.Buffer) + defer bufPool.Put(buf) + buf.Reset() + if _, _, err := p.s.NextObject(buf); err != nil { + return nil, err + } + + size = p.getDeltaObjectSize(buf) + if size <= smallObjectThreshold { + var obj = new(plumbing.MemoryObject) + obj.SetSize(size) + if h.Type == plumbing.REFDeltaObject { + err = p.fillREFDeltaObjectContentWithBuffer(obj, h.Reference, buf) + } else { + err = p.fillOFSDeltaObjectContentWithBuffer(obj, h.OffsetReference, buf) + } + return obj, err + } + } else { + size, err = p.getObjectSize(h) + if err != nil { + return nil, err + } + } + + typ, err := p.getObjectType(h) + if err != nil { + return nil, err + } + + p.offsetToType[h.Offset] = typ + + return NewFSObject( + hash, + typ, + h.Offset, + size, + p.Index, + p.fs, + p.file.Name(), + p.deltaBaseCache, + ), nil +} + +func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) { + h, err := p.objectHeaderAtOffset(offset) + if err != nil { + return nil, err + } + + // getObjectContent is called from FSObject, so we have to explicitly + // get memory object here to avoid recursive cycle + obj, err := p.getNextMemoryObject(h) + if err != nil { + return nil, err + } + + return obj.Reader() +} + +func (p *Packfile) getNextMemoryObject(h *ObjectHeader) (plumbing.EncodedObject, error) { + var obj = new(plumbing.MemoryObject) + obj.SetSize(h.Length) + obj.SetType(h.Type) + + var err error + switch h.Type { + case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject: + err = p.fillRegularObjectContent(obj) + case plumbing.REFDeltaObject: + err = p.fillREFDeltaObjectContent(obj, h.Reference) + case plumbing.OFSDeltaObject: + err = p.fillOFSDeltaObjectContent(obj, h.OffsetReference) + default: + err = ErrInvalidObject.AddDetails("type %q", h.Type) + } + + if err != nil { + return nil, err + } + + p.offsetToType[h.Offset] = obj.Type() + + return obj, nil +} + +func (p *Packfile) fillRegularObjectContent(obj plumbing.EncodedObject) (err error) { + w, err := obj.Writer() + if err != nil { + return err + } + + defer ioutil.CheckClose(w, &err) + + _, _, err = p.s.NextObject(w) + p.cachePut(obj) + + return err +} + +func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plumbing.Hash) error { + buf := bufPool.Get().(*bytes.Buffer) + defer bufPool.Put(buf) + buf.Reset() + _, _, err := p.s.NextObject(buf) + if err != nil { + return err + } + + return p.fillREFDeltaObjectContentWithBuffer(obj, ref, buf) +} + +func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, ref plumbing.Hash, buf *bytes.Buffer) error { + var err error + + base, ok := p.cacheGet(ref) + if !ok { + base, err = p.Get(ref) + if err != nil { + return err + } + } + + obj.SetType(base.Type()) + err = ApplyDelta(obj, base, buf.Bytes()) + p.cachePut(obj) + + return err +} + +func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset int64) error { + buf := bufPool.Get().(*bytes.Buffer) + defer bufPool.Put(buf) + buf.Reset() + _, _, err := p.s.NextObject(buf) + if err != nil { + return err + } + + return p.fillOFSDeltaObjectContentWithBuffer(obj, offset, buf) +} + +func (p *Packfile) fillOFSDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, offset int64, buf *bytes.Buffer) error { + hash, err := p.FindHash(offset) + if err != nil { + return err + } + + base, err := p.objectAtOffset(offset, hash) + if err != nil { + return err + } + + obj.SetType(base.Type()) + err = ApplyDelta(obj, base, buf.Bytes()) + p.cachePut(obj) + + return err +} + +func (p *Packfile) cacheGet(h plumbing.Hash) (plumbing.EncodedObject, bool) { + if p.deltaBaseCache == nil { + return nil, false + } + + return p.deltaBaseCache.Get(h) +} + +func (p *Packfile) cachePut(obj plumbing.EncodedObject) { + if p.deltaBaseCache == nil { + return + } + + p.deltaBaseCache.Put(obj) +} + +// GetAll returns an iterator with all encoded objects in the packfile. +// The iterator returned is not thread-safe, it should be used in the same +// thread as the Packfile instance. +func (p *Packfile) GetAll() (storer.EncodedObjectIter, error) { + return p.GetByType(plumbing.AnyObject) +} + +// GetByType returns all the objects of the given type. +func (p *Packfile) GetByType(typ plumbing.ObjectType) (storer.EncodedObjectIter, error) { + switch typ { + case plumbing.AnyObject, + plumbing.BlobObject, + plumbing.TreeObject, + plumbing.CommitObject, + plumbing.TagObject: + entries, err := p.EntriesByOffset() + if err != nil { + return nil, err + } + + return &objectIter{ + // Easiest way to provide an object decoder is just to pass a Packfile + // instance. To not mess with the seeks, it's a new instance with a + // different scanner but the same cache and offset to hash map for + // reusing as much cache as possible. + p: p, + iter: entries, + typ: typ, + }, nil + default: + return nil, plumbing.ErrInvalidType + } +} + +// ID returns the ID of the packfile, which is the checksum at the end of it. +func (p *Packfile) ID() (plumbing.Hash, error) { + prev, err := p.file.Seek(-20, io.SeekEnd) + if err != nil { + return plumbing.ZeroHash, err + } + + var hash plumbing.Hash + if _, err := io.ReadFull(p.file, hash[:]); err != nil { + return plumbing.ZeroHash, err + } + + if _, err := p.file.Seek(prev, io.SeekStart); err != nil { + return plumbing.ZeroHash, err + } + + return hash, nil +} + +// Scanner returns the packfile's Scanner +func (p *Packfile) Scanner() *Scanner { + return p.s +} + +// Close the packfile and its resources. +func (p *Packfile) Close() error { + closer, ok := p.file.(io.Closer) + if !ok { + return nil + } + + return closer.Close() +} + +type objectIter struct { + p *Packfile + typ plumbing.ObjectType + iter idxfile.EntryIter +} + +func (i *objectIter) Next() (plumbing.EncodedObject, error) { + for { + e, err := i.iter.Next() + if err != nil { + return nil, err + } + + if i.typ != plumbing.AnyObject { + if typ, ok := i.p.offsetToType[int64(e.Offset)]; ok { + if typ != i.typ { + continue + } + } else if obj, ok := i.p.cacheGet(e.Hash); ok { + if obj.Type() != i.typ { + i.p.offsetToType[int64(e.Offset)] = obj.Type() + continue + } + return obj, nil + } else { + h, err := i.p.objectHeaderAtOffset(int64(e.Offset)) + if err != nil { + return nil, err + } + + if h.Type == plumbing.REFDeltaObject || h.Type == plumbing.OFSDeltaObject { + typ, err := i.p.getObjectType(h) + if err != nil { + return nil, err + } + if typ != i.typ { + i.p.offsetToType[int64(e.Offset)] = typ + continue + } + // getObjectType will seek in the file so we cannot use getNextObject safely + return i.p.objectAtOffset(int64(e.Offset), e.Hash) + } else { + if h.Type != i.typ { + i.p.offsetToType[int64(e.Offset)] = h.Type + continue + } + return i.p.getNextObject(h, e.Hash) + } + } + } + + obj, err := i.p.objectAtOffset(int64(e.Offset), e.Hash) + if err != nil { + return nil, err + } + + return obj, nil + } +} + +func (i *objectIter) ForEach(f func(plumbing.EncodedObject) error) error { + for { + o, err := i.Next() + if err != nil { + if err == io.EOF { + return nil + } + return err + } + + if err := f(o); err != nil { + return err + } + } +} + +func (i *objectIter) Close() { + i.iter.Close() +} + +// isInvalid checks whether an error is an os.PathError with an os.ErrInvalid +// error inside. It also checks for the windows error, which is different from +// os.ErrInvalid. +func isInvalid(err error) bool { + pe, ok := err.(*os.PathError) + if !ok { + return false + } + + errstr := pe.Err.Error() + return errstr == errInvalidUnix || errstr == errInvalidWindows +} + +// errInvalidWindows is the Windows equivalent to os.ErrInvalid +const errInvalidWindows = "The parameter is incorrect." + +var errInvalidUnix = os.ErrInvalid.Error() diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/parser.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/parser.go new file mode 100644 index 0000000000..4b5a5708cc --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/parser.go @@ -0,0 +1,495 @@ +package packfile + +import ( + "bytes" + "errors" + "io" + stdioutil "io/ioutil" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/cache" + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +var ( + // ErrReferenceDeltaNotFound is returned when the reference delta is not + // found. + ErrReferenceDeltaNotFound = errors.New("reference delta not found") + + // ErrNotSeekableSource is returned when the source for the parser is not + // seekable and a storage was not provided, so it can't be parsed. + ErrNotSeekableSource = errors.New("parser source is not seekable and storage was not provided") + + // ErrDeltaNotCached is returned when the delta could not be found in cache. + ErrDeltaNotCached = errors.New("delta could not be found in cache") +) + +// Observer interface is implemented by index encoders. +type Observer interface { + // OnHeader is called when a new packfile is opened. + OnHeader(count uint32) error + // OnInflatedObjectHeader is called for each object header read. + OnInflatedObjectHeader(t plumbing.ObjectType, objSize int64, pos int64) error + // OnInflatedObjectContent is called for each decoded object. + OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32, content []byte) error + // OnFooter is called when decoding is done. + OnFooter(h plumbing.Hash) error +} + +// Parser decodes a packfile and calls any observer associated to it. Is used +// to generate indexes. +type Parser struct { + storage storer.EncodedObjectStorer + scanner *Scanner + count uint32 + oi []*objectInfo + oiByHash map[plumbing.Hash]*objectInfo + oiByOffset map[int64]*objectInfo + hashOffset map[plumbing.Hash]int64 + checksum plumbing.Hash + + cache *cache.BufferLRU + // delta content by offset, only used if source is not seekable + deltas map[int64][]byte + + ob []Observer +} + +// NewParser creates a new Parser. The Scanner source must be seekable. +// If it's not, NewParserWithStorage should be used instead. +func NewParser(scanner *Scanner, ob ...Observer) (*Parser, error) { + return NewParserWithStorage(scanner, nil, ob...) +} + +// NewParserWithStorage creates a new Parser. The scanner source must either +// be seekable or a storage must be provided. +func NewParserWithStorage( + scanner *Scanner, + storage storer.EncodedObjectStorer, + ob ...Observer, +) (*Parser, error) { + if !scanner.IsSeekable && storage == nil { + return nil, ErrNotSeekableSource + } + + var deltas map[int64][]byte + if !scanner.IsSeekable { + deltas = make(map[int64][]byte) + } + + return &Parser{ + storage: storage, + scanner: scanner, + ob: ob, + count: 0, + cache: cache.NewBufferLRUDefault(), + deltas: deltas, + }, nil +} + +func (p *Parser) forEachObserver(f func(o Observer) error) error { + for _, o := range p.ob { + if err := f(o); err != nil { + return err + } + } + return nil +} + +func (p *Parser) onHeader(count uint32) error { + return p.forEachObserver(func(o Observer) error { + return o.OnHeader(count) + }) +} + +func (p *Parser) onInflatedObjectHeader( + t plumbing.ObjectType, + objSize int64, + pos int64, +) error { + return p.forEachObserver(func(o Observer) error { + return o.OnInflatedObjectHeader(t, objSize, pos) + }) +} + +func (p *Parser) onInflatedObjectContent( + h plumbing.Hash, + pos int64, + crc uint32, + content []byte, +) error { + return p.forEachObserver(func(o Observer) error { + return o.OnInflatedObjectContent(h, pos, crc, content) + }) +} + +func (p *Parser) onFooter(h plumbing.Hash) error { + return p.forEachObserver(func(o Observer) error { + return o.OnFooter(h) + }) +} + +// Parse start decoding phase of the packfile. +func (p *Parser) Parse() (plumbing.Hash, error) { + if err := p.init(); err != nil { + return plumbing.ZeroHash, err + } + + if err := p.indexObjects(); err != nil { + return plumbing.ZeroHash, err + } + + var err error + p.checksum, err = p.scanner.Checksum() + if err != nil && err != io.EOF { + return plumbing.ZeroHash, err + } + + if err := p.resolveDeltas(); err != nil { + return plumbing.ZeroHash, err + } + + if err := p.onFooter(p.checksum); err != nil { + return plumbing.ZeroHash, err + } + + return p.checksum, nil +} + +func (p *Parser) init() error { + _, c, err := p.scanner.Header() + if err != nil { + return err + } + + if err := p.onHeader(c); err != nil { + return err + } + + p.count = c + p.oiByHash = make(map[plumbing.Hash]*objectInfo, p.count) + p.oiByOffset = make(map[int64]*objectInfo, p.count) + p.oi = make([]*objectInfo, p.count) + + return nil +} + +func (p *Parser) indexObjects() error { + buf := new(bytes.Buffer) + + for i := uint32(0); i < p.count; i++ { + buf.Reset() + + oh, err := p.scanner.NextObjectHeader() + if err != nil { + return err + } + + delta := false + var ota *objectInfo + switch t := oh.Type; t { + case plumbing.OFSDeltaObject: + delta = true + + parent, ok := p.oiByOffset[oh.OffsetReference] + if !ok { + return plumbing.ErrObjectNotFound + } + + ota = newDeltaObject(oh.Offset, oh.Length, t, parent) + parent.Children = append(parent.Children, ota) + case plumbing.REFDeltaObject: + delta = true + parent, ok := p.oiByHash[oh.Reference] + if !ok { + // can't find referenced object in this pack file + // this must be a "thin" pack. + parent = &objectInfo{ //Placeholder parent + SHA1: oh.Reference, + ExternalRef: true, // mark as an external reference that must be resolved + Type: plumbing.AnyObject, + DiskType: plumbing.AnyObject, + } + p.oiByHash[oh.Reference] = parent + } + ota = newDeltaObject(oh.Offset, oh.Length, t, parent) + parent.Children = append(parent.Children, ota) + + default: + ota = newBaseObject(oh.Offset, oh.Length, t) + } + + _, crc, err := p.scanner.NextObject(buf) + if err != nil { + return err + } + + ota.Crc32 = crc + ota.Length = oh.Length + + data := buf.Bytes() + if !delta { + sha1, err := getSHA1(ota.Type, data) + if err != nil { + return err + } + + ota.SHA1 = sha1 + p.oiByHash[ota.SHA1] = ota + } + + if p.storage != nil && !delta { + obj := new(plumbing.MemoryObject) + obj.SetSize(oh.Length) + obj.SetType(oh.Type) + if _, err := obj.Write(data); err != nil { + return err + } + + if _, err := p.storage.SetEncodedObject(obj); err != nil { + return err + } + } + + if delta && !p.scanner.IsSeekable { + p.deltas[oh.Offset] = make([]byte, len(data)) + copy(p.deltas[oh.Offset], data) + } + + p.oiByOffset[oh.Offset] = ota + p.oi[i] = ota + } + + return nil +} + +func (p *Parser) resolveDeltas() error { + buf := &bytes.Buffer{} + for _, obj := range p.oi { + buf.Reset() + err := p.get(obj, buf) + if err != nil { + return err + } + content := buf.Bytes() + + if err := p.onInflatedObjectHeader(obj.Type, obj.Length, obj.Offset); err != nil { + return err + } + + if err := p.onInflatedObjectContent(obj.SHA1, obj.Offset, obj.Crc32, content); err != nil { + return err + } + + if !obj.IsDelta() && len(obj.Children) > 0 { + for _, child := range obj.Children { + if err := p.resolveObject(stdioutil.Discard, child, content); err != nil { + return err + } + } + + // Remove the delta from the cache. + if obj.DiskType.IsDelta() && !p.scanner.IsSeekable { + delete(p.deltas, obj.Offset) + } + } + } + + return nil +} + +func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) { + if !o.ExternalRef { // skip cache check for placeholder parents + b, ok := p.cache.Get(o.Offset) + if ok { + _, err := buf.Write(b) + return err + } + } + + // If it's not on the cache and is not a delta we can try to find it in the + // storage, if there's one. External refs must enter here. + if p.storage != nil && !o.Type.IsDelta() { + var e plumbing.EncodedObject + e, err = p.storage.EncodedObject(plumbing.AnyObject, o.SHA1) + if err != nil { + return err + } + o.Type = e.Type() + + var r io.ReadCloser + r, err = e.Reader() + if err != nil { + return err + } + + defer ioutil.CheckClose(r, &err) + + _, err = buf.ReadFrom(io.LimitReader(r, e.Size())) + return err + } + + if o.ExternalRef { + // we were not able to resolve a ref in a thin pack + return ErrReferenceDeltaNotFound + } + + if o.DiskType.IsDelta() { + b := bufPool.Get().(*bytes.Buffer) + defer bufPool.Put(b) + b.Reset() + err := p.get(o.Parent, b) + if err != nil { + return err + } + base := b.Bytes() + + err = p.resolveObject(buf, o, base) + if err != nil { + return err + } + } else { + err := p.readData(buf, o) + if err != nil { + return err + } + } + + if len(o.Children) > 0 { + data := make([]byte, buf.Len()) + copy(data, buf.Bytes()) + p.cache.Put(o.Offset, data) + } + return nil +} + +func (p *Parser) resolveObject( + w io.Writer, + o *objectInfo, + base []byte, +) error { + if !o.DiskType.IsDelta() { + return nil + } + buf := bufPool.Get().(*bytes.Buffer) + defer bufPool.Put(buf) + buf.Reset() + err := p.readData(buf, o) + if err != nil { + return err + } + data := buf.Bytes() + + data, err = applyPatchBase(o, data, base) + if err != nil { + return err + } + + if p.storage != nil { + obj := new(plumbing.MemoryObject) + obj.SetSize(o.Size()) + obj.SetType(o.Type) + if _, err := obj.Write(data); err != nil { + return err + } + + if _, err := p.storage.SetEncodedObject(obj); err != nil { + return err + } + } + _, err = w.Write(data) + return err +} + +func (p *Parser) readData(w io.Writer, o *objectInfo) error { + if !p.scanner.IsSeekable && o.DiskType.IsDelta() { + data, ok := p.deltas[o.Offset] + if !ok { + return ErrDeltaNotCached + } + _, err := w.Write(data) + return err + } + + if _, err := p.scanner.SeekObjectHeader(o.Offset); err != nil { + return err + } + + if _, _, err := p.scanner.NextObject(w); err != nil { + return err + } + return nil +} + +func applyPatchBase(ota *objectInfo, data, base []byte) ([]byte, error) { + patched, err := PatchDelta(base, data) + if err != nil { + return nil, err + } + + if ota.SHA1 == plumbing.ZeroHash { + ota.Type = ota.Parent.Type + sha1, err := getSHA1(ota.Type, patched) + if err != nil { + return nil, err + } + + ota.SHA1 = sha1 + ota.Length = int64(len(patched)) + } + + return patched, nil +} + +func getSHA1(t plumbing.ObjectType, data []byte) (plumbing.Hash, error) { + hasher := plumbing.NewHasher(t, int64(len(data))) + if _, err := hasher.Write(data); err != nil { + return plumbing.ZeroHash, err + } + + return hasher.Sum(), nil +} + +type objectInfo struct { + Offset int64 + Length int64 + Type plumbing.ObjectType + DiskType plumbing.ObjectType + ExternalRef bool // indicates this is an external reference in a thin pack file + + Crc32 uint32 + + Parent *objectInfo + Children []*objectInfo + SHA1 plumbing.Hash +} + +func newBaseObject(offset, length int64, t plumbing.ObjectType) *objectInfo { + return newDeltaObject(offset, length, t, nil) +} + +func newDeltaObject( + offset, length int64, + t plumbing.ObjectType, + parent *objectInfo, +) *objectInfo { + obj := &objectInfo{ + Offset: offset, + Length: length, + Type: t, + DiskType: t, + Crc32: 0, + Parent: parent, + } + + return obj +} + +func (o *objectInfo) IsDelta() bool { + return o.Type.IsDelta() +} + +func (o *objectInfo) Size() int64 { + return o.Length +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go new file mode 100644 index 0000000000..9e90f30a72 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go @@ -0,0 +1,252 @@ +package packfile + +import ( + "bytes" + "errors" + "io" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +// See https://github.com/git/git/blob/49fa3dc76179e04b0833542fa52d0f287a4955ac/delta.h +// https://github.com/git/git/blob/c2c5f6b1e479f2c38e0e01345350620944e3527f/patch-delta.c, +// and https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js +// for details about the delta format. + +const deltaSizeMin = 4 + +// ApplyDelta writes to target the result of applying the modification deltas in delta to base. +func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) { + r, err := base.Reader() + if err != nil { + return err + } + + defer ioutil.CheckClose(r, &err) + + w, err := target.Writer() + if err != nil { + return err + } + + defer ioutil.CheckClose(w, &err) + + buf := bufPool.Get().(*bytes.Buffer) + defer bufPool.Put(buf) + buf.Reset() + _, err = buf.ReadFrom(r) + if err != nil { + return err + } + src := buf.Bytes() + + dst := bufPool.Get().(*bytes.Buffer) + defer bufPool.Put(dst) + dst.Reset() + err = patchDelta(dst, src, delta) + if err != nil { + return err + } + + target.SetSize(int64(dst.Len())) + + b := byteSlicePool.Get().([]byte) + _, err = io.CopyBuffer(w, dst, b) + byteSlicePool.Put(b) + return err +} + +var ( + ErrInvalidDelta = errors.New("invalid delta") + ErrDeltaCmd = errors.New("wrong delta command") +) + +// PatchDelta returns the result of applying the modification deltas in delta to src. +// An error will be returned if delta is corrupted (ErrDeltaLen) or an action command +// is not copy from source or copy from delta (ErrDeltaCmd). +func PatchDelta(src, delta []byte) ([]byte, error) { + b := &bytes.Buffer{} + if err := patchDelta(b, src, delta); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +func patchDelta(dst *bytes.Buffer, src, delta []byte) error { + if len(delta) < deltaSizeMin { + return ErrInvalidDelta + } + + srcSz, delta := decodeLEB128(delta) + if srcSz != uint(len(src)) { + return ErrInvalidDelta + } + + targetSz, delta := decodeLEB128(delta) + remainingTargetSz := targetSz + + var cmd byte + dst.Grow(int(targetSz)) + for { + if len(delta) == 0 { + return ErrInvalidDelta + } + + cmd = delta[0] + delta = delta[1:] + if isCopyFromSrc(cmd) { + var offset, sz uint + var err error + offset, delta, err = decodeOffset(cmd, delta) + if err != nil { + return err + } + + sz, delta, err = decodeSize(cmd, delta) + if err != nil { + return err + } + + if invalidSize(sz, targetSz) || + invalidOffsetSize(offset, sz, srcSz) { + break + } + dst.Write(src[offset : offset+sz]) + remainingTargetSz -= sz + } else if isCopyFromDelta(cmd) { + sz := uint(cmd) // cmd is the size itself + if invalidSize(sz, targetSz) { + return ErrInvalidDelta + } + + if uint(len(delta)) < sz { + return ErrInvalidDelta + } + + dst.Write(delta[0:sz]) + remainingTargetSz -= sz + delta = delta[sz:] + } else { + return ErrDeltaCmd + } + + if remainingTargetSz <= 0 { + break + } + } + + return nil +} + +// Decodes a number encoded as an unsigned LEB128 at the start of some +// binary data and returns the decoded number and the rest of the +// stream. +// +// This must be called twice on the delta data buffer, first to get the +// expected source buffer size, and again to get the target buffer size. +func decodeLEB128(input []byte) (uint, []byte) { + var num, sz uint + var b byte + for { + b = input[sz] + num |= (uint(b) & payload) << (sz * 7) // concats 7 bits chunks + sz++ + + if uint(b)&continuation == 0 || sz == uint(len(input)) { + break + } + } + + return num, input[sz:] +} + +const ( + payload = 0x7f // 0111 1111 + continuation = 0x80 // 1000 0000 +) + +func isCopyFromSrc(cmd byte) bool { + return (cmd & 0x80) != 0 +} + +func isCopyFromDelta(cmd byte) bool { + return (cmd&0x80) == 0 && cmd != 0 +} + +func decodeOffset(cmd byte, delta []byte) (uint, []byte, error) { + var offset uint + if (cmd & 0x01) != 0 { + if len(delta) == 0 { + return 0, nil, ErrInvalidDelta + } + offset = uint(delta[0]) + delta = delta[1:] + } + if (cmd & 0x02) != 0 { + if len(delta) == 0 { + return 0, nil, ErrInvalidDelta + } + offset |= uint(delta[0]) << 8 + delta = delta[1:] + } + if (cmd & 0x04) != 0 { + if len(delta) == 0 { + return 0, nil, ErrInvalidDelta + } + offset |= uint(delta[0]) << 16 + delta = delta[1:] + } + if (cmd & 0x08) != 0 { + if len(delta) == 0 { + return 0, nil, ErrInvalidDelta + } + offset |= uint(delta[0]) << 24 + delta = delta[1:] + } + + return offset, delta, nil +} + +func decodeSize(cmd byte, delta []byte) (uint, []byte, error) { + var sz uint + if (cmd & 0x10) != 0 { + if len(delta) == 0 { + return 0, nil, ErrInvalidDelta + } + sz = uint(delta[0]) + delta = delta[1:] + } + if (cmd & 0x20) != 0 { + if len(delta) == 0 { + return 0, nil, ErrInvalidDelta + } + sz |= uint(delta[0]) << 8 + delta = delta[1:] + } + if (cmd & 0x40) != 0 { + if len(delta) == 0 { + return 0, nil, ErrInvalidDelta + } + sz |= uint(delta[0]) << 16 + delta = delta[1:] + } + if sz == 0 { + sz = 0x10000 + } + + return sz, delta, nil +} + +func invalidSize(sz, targetSz uint) bool { + return sz > targetSz +} + +func invalidOffsetSize(offset, sz, srcSz uint) bool { + return sumOverflows(offset, sz) || + offset+sz > srcSz +} + +func sumOverflows(a, b uint) bool { + return a+b < a +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/scanner.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/scanner.go new file mode 100644 index 0000000000..6e6a687886 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/scanner.go @@ -0,0 +1,466 @@ +package packfile + +import ( + "bufio" + "bytes" + "compress/zlib" + "fmt" + "hash" + "hash/crc32" + "io" + stdioutil "io/ioutil" + "sync" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/utils/binary" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +var ( + // ErrEmptyPackfile is returned by ReadHeader when no data is found in the packfile + ErrEmptyPackfile = NewError("empty packfile") + // ErrBadSignature is returned by ReadHeader when the signature in the packfile is incorrect. + ErrBadSignature = NewError("malformed pack file signature") + // ErrUnsupportedVersion is returned by ReadHeader when the packfile version is + // different than VersionSupported. + ErrUnsupportedVersion = NewError("unsupported packfile version") + // ErrSeekNotSupported returned if seek is not support + ErrSeekNotSupported = NewError("not seek support") +) + +// ObjectHeader contains the information related to the object, this information +// is collected from the previous bytes to the content of the object. +type ObjectHeader struct { + Type plumbing.ObjectType + Offset int64 + Length int64 + Reference plumbing.Hash + OffsetReference int64 +} + +type Scanner struct { + r *scannerReader + crc hash.Hash32 + + // pendingObject is used to detect if an object has been read, or still + // is waiting to be read + pendingObject *ObjectHeader + version, objects uint32 + + // lsSeekable says if this scanner can do Seek or not, to have a Scanner + // seekable a r implementing io.Seeker is required + IsSeekable bool +} + +// NewScanner returns a new Scanner based on a reader, if the given reader +// implements io.ReadSeeker the Scanner will be also Seekable +func NewScanner(r io.Reader) *Scanner { + _, ok := r.(io.ReadSeeker) + + crc := crc32.NewIEEE() + return &Scanner{ + r: newScannerReader(r, crc), + crc: crc, + IsSeekable: ok, + } +} + +func (s *Scanner) Reset(r io.Reader) { + _, ok := r.(io.ReadSeeker) + + s.r.Reset(r) + s.crc.Reset() + s.IsSeekable = ok + s.pendingObject = nil + s.version = 0 + s.objects = 0 +} + +// Header reads the whole packfile header (signature, version and object count). +// It returns the version and the object count and performs checks on the +// validity of the signature and the version fields. +func (s *Scanner) Header() (version, objects uint32, err error) { + if s.version != 0 { + return s.version, s.objects, nil + } + + sig, err := s.readSignature() + if err != nil { + if err == io.EOF { + err = ErrEmptyPackfile + } + + return + } + + if !s.isValidSignature(sig) { + err = ErrBadSignature + return + } + + version, err = s.readVersion() + s.version = version + if err != nil { + return + } + + if !s.isSupportedVersion(version) { + err = ErrUnsupportedVersion.AddDetails("%d", version) + return + } + + objects, err = s.readCount() + s.objects = objects + return +} + +// readSignature reads an returns the signature field in the packfile. +func (s *Scanner) readSignature() ([]byte, error) { + var sig = make([]byte, 4) + if _, err := io.ReadFull(s.r, sig); err != nil { + return []byte{}, err + } + + return sig, nil +} + +// isValidSignature returns if sig is a valid packfile signature. +func (s *Scanner) isValidSignature(sig []byte) bool { + return bytes.Equal(sig, signature) +} + +// readVersion reads and returns the version field of a packfile. +func (s *Scanner) readVersion() (uint32, error) { + return binary.ReadUint32(s.r) +} + +// isSupportedVersion returns whether version v is supported by the parser. +// The current supported version is VersionSupported, defined above. +func (s *Scanner) isSupportedVersion(v uint32) bool { + return v == VersionSupported +} + +// readCount reads and returns the count of objects field of a packfile. +func (s *Scanner) readCount() (uint32, error) { + return binary.ReadUint32(s.r) +} + +// SeekObjectHeader seeks to specified offset and returns the ObjectHeader +// for the next object in the reader +func (s *Scanner) SeekObjectHeader(offset int64) (*ObjectHeader, error) { + // if seeking we assume that you are not interested in the header + if s.version == 0 { + s.version = VersionSupported + } + + if _, err := s.r.Seek(offset, io.SeekStart); err != nil { + return nil, err + } + + h, err := s.nextObjectHeader() + if err != nil { + return nil, err + } + + h.Offset = offset + return h, nil +} + +// NextObjectHeader returns the ObjectHeader for the next object in the reader +func (s *Scanner) NextObjectHeader() (*ObjectHeader, error) { + if err := s.doPending(); err != nil { + return nil, err + } + + offset, err := s.r.Seek(0, io.SeekCurrent) + if err != nil { + return nil, err + } + + h, err := s.nextObjectHeader() + if err != nil { + return nil, err + } + + h.Offset = offset + return h, nil +} + +// nextObjectHeader returns the ObjectHeader for the next object in the reader +// without the Offset field +func (s *Scanner) nextObjectHeader() (*ObjectHeader, error) { + s.r.Flush() + s.crc.Reset() + + h := &ObjectHeader{} + s.pendingObject = h + + var err error + h.Offset, err = s.r.Seek(0, io.SeekCurrent) + if err != nil { + return nil, err + } + + h.Type, h.Length, err = s.readObjectTypeAndLength() + if err != nil { + return nil, err + } + + switch h.Type { + case plumbing.OFSDeltaObject: + no, err := binary.ReadVariableWidthInt(s.r) + if err != nil { + return nil, err + } + + h.OffsetReference = h.Offset - no + case plumbing.REFDeltaObject: + var err error + h.Reference, err = binary.ReadHash(s.r) + if err != nil { + return nil, err + } + } + + return h, nil +} + +func (s *Scanner) doPending() error { + if s.version == 0 { + var err error + s.version, s.objects, err = s.Header() + if err != nil { + return err + } + } + + return s.discardObjectIfNeeded() +} + +func (s *Scanner) discardObjectIfNeeded() error { + if s.pendingObject == nil { + return nil + } + + h := s.pendingObject + n, _, err := s.NextObject(stdioutil.Discard) + if err != nil { + return err + } + + if n != h.Length { + return fmt.Errorf( + "error discarding object, discarded %d, expected %d", + n, h.Length, + ) + } + + return nil +} + +// ReadObjectTypeAndLength reads and returns the object type and the +// length field from an object entry in a packfile. +func (s *Scanner) readObjectTypeAndLength() (plumbing.ObjectType, int64, error) { + t, c, err := s.readType() + if err != nil { + return t, 0, err + } + + l, err := s.readLength(c) + + return t, l, err +} + +func (s *Scanner) readType() (plumbing.ObjectType, byte, error) { + var c byte + var err error + if c, err = s.r.ReadByte(); err != nil { + return plumbing.ObjectType(0), 0, err + } + + typ := parseType(c) + + return typ, c, nil +} + +func parseType(b byte) plumbing.ObjectType { + return plumbing.ObjectType((b & maskType) >> firstLengthBits) +} + +// the length is codified in the last 4 bits of the first byte and in +// the last 7 bits of subsequent bytes. Last byte has a 0 MSB. +func (s *Scanner) readLength(first byte) (int64, error) { + length := int64(first & maskFirstLength) + + c := first + shift := firstLengthBits + var err error + for c&maskContinue > 0 { + if c, err = s.r.ReadByte(); err != nil { + return 0, err + } + + length += int64(c&maskLength) << shift + shift += lengthBits + } + + return length, nil +} + +// NextObject writes the content of the next object into the reader, returns +// the number of bytes written, the CRC32 of the content and an error, if any +func (s *Scanner) NextObject(w io.Writer) (written int64, crc32 uint32, err error) { + s.pendingObject = nil + written, err = s.copyObject(w) + + s.r.Flush() + crc32 = s.crc.Sum32() + s.crc.Reset() + + return +} + +// ReadRegularObject reads and write a non-deltified object +// from it zlib stream in an object entry in the packfile. +func (s *Scanner) copyObject(w io.Writer) (n int64, err error) { + zr := zlibReaderPool.Get().(io.ReadCloser) + defer zlibReaderPool.Put(zr) + + if err = zr.(zlib.Resetter).Reset(s.r, nil); err != nil { + return 0, fmt.Errorf("zlib reset error: %s", err) + } + + defer ioutil.CheckClose(zr, &err) + buf := byteSlicePool.Get().([]byte) + n, err = io.CopyBuffer(w, zr, buf) + byteSlicePool.Put(buf) + return +} + +var byteSlicePool = sync.Pool{ + New: func() interface{} { + return make([]byte, 32*1024) + }, +} + +// SeekFromStart sets a new offset from start, returns the old position before +// the change. +func (s *Scanner) SeekFromStart(offset int64) (previous int64, err error) { + // if seeking we assume that you are not interested in the header + if s.version == 0 { + s.version = VersionSupported + } + + previous, err = s.r.Seek(0, io.SeekCurrent) + if err != nil { + return -1, err + } + + _, err = s.r.Seek(offset, io.SeekStart) + return previous, err +} + +// Checksum returns the checksum of the packfile +func (s *Scanner) Checksum() (plumbing.Hash, error) { + err := s.discardObjectIfNeeded() + if err != nil { + return plumbing.ZeroHash, err + } + + return binary.ReadHash(s.r) +} + +// Close reads the reader until io.EOF +func (s *Scanner) Close() error { + buf := byteSlicePool.Get().([]byte) + _, err := io.CopyBuffer(stdioutil.Discard, s.r, buf) + byteSlicePool.Put(buf) + return err +} + +// Flush is a no-op (deprecated) +func (s *Scanner) Flush() error { + return nil +} + +// scannerReader has the following characteristics: +// - Provides an io.SeekReader impl for bufio.Reader, when the underlying +// reader supports it. +// - Keeps track of the current read position, for when the underlying reader +// isn't an io.SeekReader, but we still want to know the current offset. +// - Writes to the hash writer what it reads, with the aid of a smaller buffer. +// The buffer helps avoid a performance penality for performing small writes +// to the crc32 hash writer. +type scannerReader struct { + reader io.Reader + crc io.Writer + rbuf *bufio.Reader + wbuf *bufio.Writer + offset int64 +} + +func newScannerReader(r io.Reader, h io.Writer) *scannerReader { + sr := &scannerReader{ + rbuf: bufio.NewReader(nil), + wbuf: bufio.NewWriterSize(nil, 64), + crc: h, + } + sr.Reset(r) + + return sr +} + +func (r *scannerReader) Reset(reader io.Reader) { + r.reader = reader + r.rbuf.Reset(r.reader) + r.wbuf.Reset(r.crc) + + r.offset = 0 + if seeker, ok := r.reader.(io.ReadSeeker); ok { + r.offset, _ = seeker.Seek(0, io.SeekCurrent) + } +} + +func (r *scannerReader) Read(p []byte) (n int, err error) { + n, err = r.rbuf.Read(p) + + r.offset += int64(n) + if _, err := r.wbuf.Write(p[:n]); err != nil { + return n, err + } + return +} + +func (r *scannerReader) ReadByte() (b byte, err error) { + b, err = r.rbuf.ReadByte() + if err == nil { + r.offset++ + return b, r.wbuf.WriteByte(b) + } + return +} + +func (r *scannerReader) Flush() error { + return r.wbuf.Flush() +} + +// Seek seeks to a location. If the underlying reader is not an io.ReadSeeker, +// then only whence=io.SeekCurrent is supported, any other operation fails. +func (r *scannerReader) Seek(offset int64, whence int) (int64, error) { + var err error + + if seeker, ok := r.reader.(io.ReadSeeker); !ok { + if whence != io.SeekCurrent || offset != 0 { + return -1, ErrSeekNotSupported + } + } else { + if whence == io.SeekCurrent && offset == 0 { + return r.offset, nil + } + + r.offset, err = seeker.Seek(offset, whence) + r.rbuf.Reset(r.reader) + } + + return r.offset, err +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/encoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/encoder.go new file mode 100644 index 0000000000..6d409795b0 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/encoder.go @@ -0,0 +1,122 @@ +// Package pktline implements reading payloads form pkt-lines and encoding +// pkt-lines from payloads. +package pktline + +import ( + "bytes" + "errors" + "fmt" + "io" +) + +// An Encoder writes pkt-lines to an output stream. +type Encoder struct { + w io.Writer +} + +const ( + // MaxPayloadSize is the maximum payload size of a pkt-line in bytes. + MaxPayloadSize = 65516 + + // For compatibility with canonical Git implementation, accept longer pkt-lines + OversizePayloadMax = 65520 +) + +var ( + // FlushPkt are the contents of a flush-pkt pkt-line. + FlushPkt = []byte{'0', '0', '0', '0'} + // Flush is the payload to use with the Encode method to encode a flush-pkt. + Flush = []byte{} + // FlushString is the payload to use with the EncodeString method to encode a flush-pkt. + FlushString = "" + // ErrPayloadTooLong is returned by the Encode methods when any of the + // provided payloads is bigger than MaxPayloadSize. + ErrPayloadTooLong = errors.New("payload is too long") +) + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: w, + } +} + +// Flush encodes a flush-pkt to the output stream. +func (e *Encoder) Flush() error { + _, err := e.w.Write(FlushPkt) + return err +} + +// Encode encodes a pkt-line with the payload specified and write it to +// the output stream. If several payloads are specified, each of them +// will get streamed in their own pkt-lines. +func (e *Encoder) Encode(payloads ...[]byte) error { + for _, p := range payloads { + if err := e.encodeLine(p); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeLine(p []byte) error { + if len(p) > MaxPayloadSize { + return ErrPayloadTooLong + } + + if bytes.Equal(p, Flush) { + return e.Flush() + } + + n := len(p) + 4 + if _, err := e.w.Write(asciiHex16(n)); err != nil { + return err + } + _, err := e.w.Write(p) + return err +} + +// Returns the hexadecimal ascii representation of the 16 less +// significant bits of n. The length of the returned slice will always +// be 4. Example: if n is 1234 (0x4d2), the return value will be +// []byte{'0', '4', 'd', '2'}. +func asciiHex16(n int) []byte { + var ret [4]byte + ret[0] = byteToASCIIHex(byte(n & 0xf000 >> 12)) + ret[1] = byteToASCIIHex(byte(n & 0x0f00 >> 8)) + ret[2] = byteToASCIIHex(byte(n & 0x00f0 >> 4)) + ret[3] = byteToASCIIHex(byte(n & 0x000f)) + + return ret[:] +} + +// turns a byte into its hexadecimal ascii representation. Example: +// from 11 (0xb) to 'b'. +func byteToASCIIHex(n byte) byte { + if n < 10 { + return '0' + n + } + + return 'a' - 10 + n +} + +// EncodeString works similarly as Encode but payloads are specified as strings. +func (e *Encoder) EncodeString(payloads ...string) error { + for _, p := range payloads { + if err := e.Encode([]byte(p)); err != nil { + return err + } + } + + return nil +} + +// Encodef encodes a single pkt-line with the payload formatted as +// the format specifier. The rest of the arguments will be used in +// the format string. +func (e *Encoder) Encodef(format string, a ...interface{}) error { + return e.EncodeString( + fmt.Sprintf(format, a...), + ) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/scanner.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/scanner.go new file mode 100644 index 0000000000..99aab46e88 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/scanner.go @@ -0,0 +1,134 @@ +package pktline + +import ( + "errors" + "io" +) + +const ( + lenSize = 4 +) + +// ErrInvalidPktLen is returned by Err() when an invalid pkt-len is found. +var ErrInvalidPktLen = errors.New("invalid pkt-len found") + +// Scanner provides a convenient interface for reading the payloads of a +// series of pkt-lines. It takes an io.Reader providing the source, +// which then can be tokenized through repeated calls to the Scan +// method. +// +// After each Scan call, the Bytes method will return the payload of the +// corresponding pkt-line on a shared buffer, which will be 65516 bytes +// or smaller. Flush pkt-lines are represented by empty byte slices. +// +// Scanning stops at EOF or the first I/O error. +type Scanner struct { + r io.Reader // The reader provided by the client + err error // Sticky error + payload []byte // Last pkt-payload + len [lenSize]byte // Last pkt-len +} + +// NewScanner returns a new Scanner to read from r. +func NewScanner(r io.Reader) *Scanner { + return &Scanner{ + r: r, + } +} + +// Err returns the first error encountered by the Scanner. +func (s *Scanner) Err() error { + return s.err +} + +// Scan advances the Scanner to the next pkt-line, whose payload will +// then be available through the Bytes method. Scanning stops at EOF +// or the first I/O error. After Scan returns false, the Err method +// will return any error that occurred during scanning, except that if +// it was io.EOF, Err will return nil. +func (s *Scanner) Scan() bool { + var l int + l, s.err = s.readPayloadLen() + if s.err == io.EOF { + s.err = nil + return false + } + if s.err != nil { + return false + } + + if cap(s.payload) < l { + s.payload = make([]byte, 0, l) + } + + if _, s.err = io.ReadFull(s.r, s.payload[:l]); s.err != nil { + return false + } + s.payload = s.payload[:l] + + return true +} + +// Bytes returns the most recent payload generated by a call to Scan. +// The underlying array may point to data that will be overwritten by a +// subsequent call to Scan. It does no allocation. +func (s *Scanner) Bytes() []byte { + return s.payload +} + +// Method readPayloadLen returns the payload length by reading the +// pkt-len and subtracting the pkt-len size. +func (s *Scanner) readPayloadLen() (int, error) { + if _, err := io.ReadFull(s.r, s.len[:]); err != nil { + if err == io.ErrUnexpectedEOF { + return 0, ErrInvalidPktLen + } + + return 0, err + } + + n, err := hexDecode(s.len) + if err != nil { + return 0, err + } + + switch { + case n == 0: + return 0, nil + case n <= lenSize: + return 0, ErrInvalidPktLen + case n > OversizePayloadMax+lenSize: + return 0, ErrInvalidPktLen + default: + return n - lenSize, nil + } +} + +// Turns the hexadecimal representation of a number in a byte slice into +// a number. This function substitute strconv.ParseUint(string(buf), 16, +// 16) and/or hex.Decode, to avoid generating new strings, thus helping the +// GC. +func hexDecode(buf [lenSize]byte) (int, error) { + var ret int + for i := 0; i < lenSize; i++ { + n, err := asciiHexToByte(buf[i]) + if err != nil { + return 0, ErrInvalidPktLen + } + ret = 16*ret + int(n) + } + return ret, nil +} + +// turns the hexadecimal ascii representation of a byte into its +// numerical value. Example: from 'b' to 11 (0xb). +func asciiHexToByte(b byte) (byte, error) { + switch { + case b >= '0' && b <= '9': + return b - '0', nil + case b >= 'a' && b <= 'f': + return b - 'a' + 10, nil + default: + return 0, ErrInvalidPktLen + } +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/hash.go b/vendor/github.com/go-git/go-git/v5/plumbing/hash.go new file mode 100644 index 0000000000..afc602a9ec --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/hash.go @@ -0,0 +1,83 @@ +package plumbing + +import ( + "bytes" + "crypto/sha1" + "encoding/hex" + "hash" + "sort" + "strconv" +) + +// Hash SHA1 hashed content +type Hash [20]byte + +// ZeroHash is Hash with value zero +var ZeroHash Hash + +// ComputeHash compute the hash for a given ObjectType and content +func ComputeHash(t ObjectType, content []byte) Hash { + h := NewHasher(t, int64(len(content))) + h.Write(content) + return h.Sum() +} + +// NewHash return a new Hash from a hexadecimal hash representation +func NewHash(s string) Hash { + b, _ := hex.DecodeString(s) + + var h Hash + copy(h[:], b) + + return h +} + +func (h Hash) IsZero() bool { + var empty Hash + return h == empty +} + +func (h Hash) String() string { + return hex.EncodeToString(h[:]) +} + +type Hasher struct { + hash.Hash +} + +func NewHasher(t ObjectType, size int64) Hasher { + h := Hasher{sha1.New()} + h.Write(t.Bytes()) + h.Write([]byte(" ")) + h.Write([]byte(strconv.FormatInt(size, 10))) + h.Write([]byte{0}) + return h +} + +func (h Hasher) Sum() (hash Hash) { + copy(hash[:], h.Hash.Sum(nil)) + return +} + +// HashesSort sorts a slice of Hashes in increasing order. +func HashesSort(a []Hash) { + sort.Sort(HashSlice(a)) +} + +// HashSlice attaches the methods of sort.Interface to []Hash, sorting in +// increasing order. +type HashSlice []Hash + +func (p HashSlice) Len() int { return len(p) } +func (p HashSlice) Less(i, j int) bool { return bytes.Compare(p[i][:], p[j][:]) < 0 } +func (p HashSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// IsHash returns true if the given string is a valid hash. +func IsHash(s string) bool { + if len(s) != 40 { + return false + } + + _, err := hex.DecodeString(s) + return err == nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/memory.go b/vendor/github.com/go-git/go-git/v5/plumbing/memory.go new file mode 100644 index 0000000000..21337cc0dd --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/memory.go @@ -0,0 +1,72 @@ +package plumbing + +import ( + "bytes" + "io" +) + +// MemoryObject on memory Object implementation +type MemoryObject struct { + t ObjectType + h Hash + cont []byte + sz int64 +} + +// Hash returns the object Hash, the hash is calculated on-the-fly the first +// time it's called, in all subsequent calls the same Hash is returned even +// if the type or the content have changed. The Hash is only generated if the +// size of the content is exactly the object size. +func (o *MemoryObject) Hash() Hash { + if o.h == ZeroHash && int64(len(o.cont)) == o.sz { + o.h = ComputeHash(o.t, o.cont) + } + + return o.h +} + +// Type return the ObjectType +func (o *MemoryObject) Type() ObjectType { return o.t } + +// SetType sets the ObjectType +func (o *MemoryObject) SetType(t ObjectType) { o.t = t } + +// Size return the size of the object +func (o *MemoryObject) Size() int64 { return o.sz } + +// SetSize set the object size, a content of the given size should be written +// afterwards +func (o *MemoryObject) SetSize(s int64) { o.sz = s } + +// Reader returns an io.ReadCloser used to read the object's content. +// +// For a MemoryObject, this reader is seekable. +func (o *MemoryObject) Reader() (io.ReadCloser, error) { + return nopCloser{bytes.NewReader(o.cont)}, nil +} + +// Writer returns a ObjectWriter used to write the object's content. +func (o *MemoryObject) Writer() (io.WriteCloser, error) { + return o, nil +} + +func (o *MemoryObject) Write(p []byte) (n int, err error) { + o.cont = append(o.cont, p...) + o.sz = int64(len(o.cont)) + + return len(p), nil +} + +// Close releases any resources consumed by the object when it is acting as a +// ObjectWriter. +func (o *MemoryObject) Close() error { return nil } + +// nopCloser exposes the extra methods of bytes.Reader while nopping Close(). +// +// This allows clients to attempt seeking in a cached Blob's Reader. +type nopCloser struct { + *bytes.Reader +} + +// Close does nothing. +func (nc nopCloser) Close() error { return nil } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object.go b/vendor/github.com/go-git/go-git/v5/plumbing/object.go new file mode 100644 index 0000000000..2655dee43e --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object.go @@ -0,0 +1,111 @@ +// package plumbing implement the core interfaces and structs used by go-git +package plumbing + +import ( + "errors" + "io" +) + +var ( + ErrObjectNotFound = errors.New("object not found") + // ErrInvalidType is returned when an invalid object type is provided. + ErrInvalidType = errors.New("invalid object type") +) + +// Object is a generic representation of any git object +type EncodedObject interface { + Hash() Hash + Type() ObjectType + SetType(ObjectType) + Size() int64 + SetSize(int64) + Reader() (io.ReadCloser, error) + Writer() (io.WriteCloser, error) +} + +// DeltaObject is an EncodedObject representing a delta. +type DeltaObject interface { + EncodedObject + // BaseHash returns the hash of the object used as base for this delta. + BaseHash() Hash + // ActualHash returns the hash of the object after applying the delta. + ActualHash() Hash + // Size returns the size of the object after applying the delta. + ActualSize() int64 +} + +// ObjectType internal object type +// Integer values from 0 to 7 map to those exposed by git. +// AnyObject is used to represent any from 0 to 7. +type ObjectType int8 + +const ( + InvalidObject ObjectType = 0 + CommitObject ObjectType = 1 + TreeObject ObjectType = 2 + BlobObject ObjectType = 3 + TagObject ObjectType = 4 + // 5 reserved for future expansion + OFSDeltaObject ObjectType = 6 + REFDeltaObject ObjectType = 7 + + AnyObject ObjectType = -127 +) + +func (t ObjectType) String() string { + switch t { + case CommitObject: + return "commit" + case TreeObject: + return "tree" + case BlobObject: + return "blob" + case TagObject: + return "tag" + case OFSDeltaObject: + return "ofs-delta" + case REFDeltaObject: + return "ref-delta" + case AnyObject: + return "any" + default: + return "unknown" + } +} + +func (t ObjectType) Bytes() []byte { + return []byte(t.String()) +} + +// Valid returns true if t is a valid ObjectType. +func (t ObjectType) Valid() bool { + return t >= CommitObject && t <= REFDeltaObject +} + +// IsDelta returns true for any ObjectTyoe that represents a delta (i.e. +// REFDeltaObject or OFSDeltaObject). +func (t ObjectType) IsDelta() bool { + return t == REFDeltaObject || t == OFSDeltaObject +} + +// ParseObjectType parses a string representation of ObjectType. It returns an +// error on parse failure. +func ParseObjectType(value string) (typ ObjectType, err error) { + switch value { + case "commit": + typ = CommitObject + case "tree": + typ = TreeObject + case "blob": + typ = BlobObject + case "tag": + typ = TagObject + case "ofs-delta": + typ = OFSDeltaObject + case "ref-delta": + typ = REFDeltaObject + default: + err = ErrInvalidType + } + return +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/blob.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/blob.go new file mode 100644 index 0000000000..8fb7576fa3 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/blob.go @@ -0,0 +1,144 @@ +package object + +import ( + "io" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +// Blob is used to store arbitrary data - it is generally a file. +type Blob struct { + // Hash of the blob. + Hash plumbing.Hash + // Size of the (uncompressed) blob. + Size int64 + + obj plumbing.EncodedObject +} + +// GetBlob gets a blob from an object storer and decodes it. +func GetBlob(s storer.EncodedObjectStorer, h plumbing.Hash) (*Blob, error) { + o, err := s.EncodedObject(plumbing.BlobObject, h) + if err != nil { + return nil, err + } + + return DecodeBlob(o) +} + +// DecodeObject decodes an encoded object into a *Blob. +func DecodeBlob(o plumbing.EncodedObject) (*Blob, error) { + b := &Blob{} + if err := b.Decode(o); err != nil { + return nil, err + } + + return b, nil +} + +// ID returns the object ID of the blob. The returned value will always match +// the current value of Blob.Hash. +// +// ID is present to fulfill the Object interface. +func (b *Blob) ID() plumbing.Hash { + return b.Hash +} + +// Type returns the type of object. It always returns plumbing.BlobObject. +// +// Type is present to fulfill the Object interface. +func (b *Blob) Type() plumbing.ObjectType { + return plumbing.BlobObject +} + +// Decode transforms a plumbing.EncodedObject into a Blob struct. +func (b *Blob) Decode(o plumbing.EncodedObject) error { + if o.Type() != plumbing.BlobObject { + return ErrUnsupportedObject + } + + b.Hash = o.Hash() + b.Size = o.Size() + b.obj = o + + return nil +} + +// Encode transforms a Blob into a plumbing.EncodedObject. +func (b *Blob) Encode(o plumbing.EncodedObject) (err error) { + o.SetType(plumbing.BlobObject) + + w, err := o.Writer() + if err != nil { + return err + } + + defer ioutil.CheckClose(w, &err) + + r, err := b.Reader() + if err != nil { + return err + } + + defer ioutil.CheckClose(r, &err) + + _, err = io.Copy(w, r) + return err +} + +// Reader returns a reader allow the access to the content of the blob +func (b *Blob) Reader() (io.ReadCloser, error) { + return b.obj.Reader() +} + +// BlobIter provides an iterator for a set of blobs. +type BlobIter struct { + storer.EncodedObjectIter + s storer.EncodedObjectStorer +} + +// NewBlobIter takes a storer.EncodedObjectStorer and a +// storer.EncodedObjectIter and returns a *BlobIter that iterates over all +// blobs contained in the storer.EncodedObjectIter. +// +// Any non-blob object returned by the storer.EncodedObjectIter is skipped. +func NewBlobIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *BlobIter { + return &BlobIter{iter, s} +} + +// Next moves the iterator to the next blob and returns a pointer to it. If +// there are no more blobs, it returns io.EOF. +func (iter *BlobIter) Next() (*Blob, error) { + for { + obj, err := iter.EncodedObjectIter.Next() + if err != nil { + return nil, err + } + + if obj.Type() != plumbing.BlobObject { + continue + } + + return DecodeBlob(obj) + } +} + +// ForEach call the cb function for each blob contained on this iter until +// an error happens or the end of the iter is reached. If ErrStop is sent +// the iteration is stop but no error is returned. The iterator is closed. +func (iter *BlobIter) ForEach(cb func(*Blob) error) error { + return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { + if obj.Type() != plumbing.BlobObject { + return nil + } + + b, err := DecodeBlob(obj) + if err != nil { + return err + } + + return cb(b) + }) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/change.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/change.go new file mode 100644 index 0000000000..8b119bc9c9 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/change.go @@ -0,0 +1,159 @@ +package object + +import ( + "bytes" + "context" + "fmt" + "strings" + + "github.com/go-git/go-git/v5/utils/merkletrie" +) + +// Change values represent a detected change between two git trees. For +// modifications, From is the original status of the node and To is its +// final status. For insertions, From is the zero value and for +// deletions To is the zero value. +type Change struct { + From ChangeEntry + To ChangeEntry +} + +var empty ChangeEntry + +// Action returns the kind of action represented by the change, an +// insertion, a deletion or a modification. +func (c *Change) Action() (merkletrie.Action, error) { + if c.From == empty && c.To == empty { + return merkletrie.Action(0), + fmt.Errorf("malformed change: empty from and to") + } + + if c.From == empty { + return merkletrie.Insert, nil + } + + if c.To == empty { + return merkletrie.Delete, nil + } + + return merkletrie.Modify, nil +} + +// Files return the files before and after a change. +// For insertions from will be nil. For deletions to will be nil. +func (c *Change) Files() (from, to *File, err error) { + action, err := c.Action() + if err != nil { + return + } + + if action == merkletrie.Insert || action == merkletrie.Modify { + to, err = c.To.Tree.TreeEntryFile(&c.To.TreeEntry) + if !c.To.TreeEntry.Mode.IsFile() { + return nil, nil, nil + } + + if err != nil { + return + } + } + + if action == merkletrie.Delete || action == merkletrie.Modify { + from, err = c.From.Tree.TreeEntryFile(&c.From.TreeEntry) + if !c.From.TreeEntry.Mode.IsFile() { + return nil, nil, nil + } + + if err != nil { + return + } + } + + return +} + +func (c *Change) String() string { + action, err := c.Action() + if err != nil { + return "malformed change" + } + + return fmt.Sprintf("", action, c.name()) +} + +// Patch returns a Patch with all the file changes in chunks. This +// representation can be used to create several diff outputs. +func (c *Change) Patch() (*Patch, error) { + return c.PatchContext(context.Background()) +} + +// Patch returns a Patch with all the file changes in chunks. This +// representation can be used to create several diff outputs. +// If context expires, an non-nil error will be returned +// Provided context must be non-nil +func (c *Change) PatchContext(ctx context.Context) (*Patch, error) { + return getPatchContext(ctx, "", c) +} + +func (c *Change) name() string { + if c.From != empty { + return c.From.Name + } + + return c.To.Name +} + +// ChangeEntry values represent a node that has suffered a change. +type ChangeEntry struct { + // Full path of the node using "/" as separator. + Name string + // Parent tree of the node that has changed. + Tree *Tree + // The entry of the node. + TreeEntry TreeEntry +} + +// Changes represents a collection of changes between two git trees. +// Implements sort.Interface lexicographically over the path of the +// changed files. +type Changes []*Change + +func (c Changes) Len() int { + return len(c) +} + +func (c Changes) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} + +func (c Changes) Less(i, j int) bool { + return strings.Compare(c[i].name(), c[j].name()) < 0 +} + +func (c Changes) String() string { + var buffer bytes.Buffer + buffer.WriteString("[") + comma := "" + for _, v := range c { + buffer.WriteString(comma) + buffer.WriteString(v.String()) + comma = ", " + } + buffer.WriteString("]") + + return buffer.String() +} + +// Patch returns a Patch with all the changes in chunks. This +// representation can be used to create several diff outputs. +func (c Changes) Patch() (*Patch, error) { + return c.PatchContext(context.Background()) +} + +// Patch returns a Patch with all the changes in chunks. This +// representation can be used to create several diff outputs. +// If context expires, an non-nil error will be returned +// Provided context must be non-nil +func (c Changes) PatchContext(ctx context.Context) (*Patch, error) { + return getPatchContext(ctx, "", c...) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/change_adaptor.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/change_adaptor.go new file mode 100644 index 0000000000..f701188288 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/change_adaptor.go @@ -0,0 +1,61 @@ +package object + +import ( + "errors" + "fmt" + + "github.com/go-git/go-git/v5/utils/merkletrie" + "github.com/go-git/go-git/v5/utils/merkletrie/noder" +) + +// The following functions transform changes types form the merkletrie +// package to changes types from this package. + +func newChange(c merkletrie.Change) (*Change, error) { + ret := &Change{} + + var err error + if ret.From, err = newChangeEntry(c.From); err != nil { + return nil, fmt.Errorf("From field: %s", err) + } + + if ret.To, err = newChangeEntry(c.To); err != nil { + return nil, fmt.Errorf("To field: %s", err) + } + + return ret, nil +} + +func newChangeEntry(p noder.Path) (ChangeEntry, error) { + if p == nil { + return empty, nil + } + + asTreeNoder, ok := p.Last().(*treeNoder) + if !ok { + return ChangeEntry{}, errors.New("cannot transform non-TreeNoders") + } + + return ChangeEntry{ + Name: p.String(), + Tree: asTreeNoder.parent, + TreeEntry: TreeEntry{ + Name: asTreeNoder.name, + Mode: asTreeNoder.mode, + Hash: asTreeNoder.hash, + }, + }, nil +} + +func newChanges(src merkletrie.Changes) (Changes, error) { + ret := make(Changes, len(src)) + var err error + for i, e := range src { + ret[i], err = newChange(e) + if err != nil { + return nil, fmt.Errorf("change #%d: %s", i, err) + } + } + + return ret, nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go new file mode 100644 index 0000000000..98664a1ebf --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go @@ -0,0 +1,442 @@ +package object + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io" + "strings" + + "golang.org/x/crypto/openpgp" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +const ( + beginpgp string = "-----BEGIN PGP SIGNATURE-----" + endpgp string = "-----END PGP SIGNATURE-----" + headerpgp string = "gpgsig" +) + +// Hash represents the hash of an object +type Hash plumbing.Hash + +// Commit points to a single tree, marking it as what the project looked like +// at a certain point in time. It contains meta-information about that point +// in time, such as a timestamp, the author of the changes since the last +// commit, a pointer to the previous commit(s), etc. +// http://shafiulazam.com/gitbook/1_the_git_object_model.html +type Commit struct { + // Hash of the commit object. + Hash plumbing.Hash + // Author is the original author of the commit. + Author Signature + // Committer is the one performing the commit, might be different from + // Author. + Committer Signature + // PGPSignature is the PGP signature of the commit. + PGPSignature string + // Message is the commit message, contains arbitrary text. + Message string + // TreeHash is the hash of the root tree of the commit. + TreeHash plumbing.Hash + // ParentHashes are the hashes of the parent commits of the commit. + ParentHashes []plumbing.Hash + + s storer.EncodedObjectStorer +} + +// GetCommit gets a commit from an object storer and decodes it. +func GetCommit(s storer.EncodedObjectStorer, h plumbing.Hash) (*Commit, error) { + o, err := s.EncodedObject(plumbing.CommitObject, h) + if err != nil { + return nil, err + } + + return DecodeCommit(s, o) +} + +// DecodeCommit decodes an encoded object into a *Commit and associates it to +// the given object storer. +func DecodeCommit(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Commit, error) { + c := &Commit{s: s} + if err := c.Decode(o); err != nil { + return nil, err + } + + return c, nil +} + +// Tree returns the Tree from the commit. +func (c *Commit) Tree() (*Tree, error) { + return GetTree(c.s, c.TreeHash) +} + +// PatchContext returns the Patch between the actual commit and the provided one. +// Error will be return if context expires. Provided context must be non-nil. +// +// NOTE: Since version 5.1.0 the renames are correctly handled, the settings +// used are the recommended options DefaultDiffTreeOptions. +func (c *Commit) PatchContext(ctx context.Context, to *Commit) (*Patch, error) { + fromTree, err := c.Tree() + if err != nil { + return nil, err + } + + var toTree *Tree + if to != nil { + toTree, err = to.Tree() + if err != nil { + return nil, err + } + } + + return fromTree.PatchContext(ctx, toTree) +} + +// Patch returns the Patch between the actual commit and the provided one. +// +// NOTE: Since version 5.1.0 the renames are correctly handled, the settings +// used are the recommended options DefaultDiffTreeOptions. +func (c *Commit) Patch(to *Commit) (*Patch, error) { + return c.PatchContext(context.Background(), to) +} + +// Parents return a CommitIter to the parent Commits. +func (c *Commit) Parents() CommitIter { + return NewCommitIter(c.s, + storer.NewEncodedObjectLookupIter(c.s, plumbing.CommitObject, c.ParentHashes), + ) +} + +// NumParents returns the number of parents in a commit. +func (c *Commit) NumParents() int { + return len(c.ParentHashes) +} + +var ErrParentNotFound = errors.New("commit parent not found") + +// Parent returns the ith parent of a commit. +func (c *Commit) Parent(i int) (*Commit, error) { + if len(c.ParentHashes) == 0 || i > len(c.ParentHashes)-1 { + return nil, ErrParentNotFound + } + + return GetCommit(c.s, c.ParentHashes[i]) +} + +// File returns the file with the specified "path" in the commit and a +// nil error if the file exists. If the file does not exist, it returns +// a nil file and the ErrFileNotFound error. +func (c *Commit) File(path string) (*File, error) { + tree, err := c.Tree() + if err != nil { + return nil, err + } + + return tree.File(path) +} + +// Files returns a FileIter allowing to iterate over the Tree +func (c *Commit) Files() (*FileIter, error) { + tree, err := c.Tree() + if err != nil { + return nil, err + } + + return tree.Files(), nil +} + +// ID returns the object ID of the commit. The returned value will always match +// the current value of Commit.Hash. +// +// ID is present to fulfill the Object interface. +func (c *Commit) ID() plumbing.Hash { + return c.Hash +} + +// Type returns the type of object. It always returns plumbing.CommitObject. +// +// Type is present to fulfill the Object interface. +func (c *Commit) Type() plumbing.ObjectType { + return plumbing.CommitObject +} + +// Decode transforms a plumbing.EncodedObject into a Commit struct. +func (c *Commit) Decode(o plumbing.EncodedObject) (err error) { + if o.Type() != plumbing.CommitObject { + return ErrUnsupportedObject + } + + c.Hash = o.Hash() + + reader, err := o.Reader() + if err != nil { + return err + } + defer ioutil.CheckClose(reader, &err) + + r := bufPool.Get().(*bufio.Reader) + defer bufPool.Put(r) + r.Reset(reader) + + var message bool + var pgpsig bool + var msgbuf bytes.Buffer + for { + line, err := r.ReadBytes('\n') + if err != nil && err != io.EOF { + return err + } + + if pgpsig { + if len(line) > 0 && line[0] == ' ' { + line = bytes.TrimLeft(line, " ") + c.PGPSignature += string(line) + continue + } else { + pgpsig = false + } + } + + if !message { + line = bytes.TrimSpace(line) + if len(line) == 0 { + message = true + continue + } + + split := bytes.SplitN(line, []byte{' '}, 2) + + var data []byte + if len(split) == 2 { + data = split[1] + } + + switch string(split[0]) { + case "tree": + c.TreeHash = plumbing.NewHash(string(data)) + case "parent": + c.ParentHashes = append(c.ParentHashes, plumbing.NewHash(string(data))) + case "author": + c.Author.Decode(data) + case "committer": + c.Committer.Decode(data) + case headerpgp: + c.PGPSignature += string(data) + "\n" + pgpsig = true + } + } else { + msgbuf.Write(line) + } + + if err == io.EOF { + break + } + } + c.Message = msgbuf.String() + return nil +} + +// Encode transforms a Commit into a plumbing.EncodedObject. +func (c *Commit) Encode(o plumbing.EncodedObject) error { + return c.encode(o, true) +} + +// EncodeWithoutSignature export a Commit into a plumbing.EncodedObject without the signature (correspond to the payload of the PGP signature). +func (c *Commit) EncodeWithoutSignature(o plumbing.EncodedObject) error { + return c.encode(o, false) +} + +func (c *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) { + o.SetType(plumbing.CommitObject) + w, err := o.Writer() + if err != nil { + return err + } + + defer ioutil.CheckClose(w, &err) + + if _, err = fmt.Fprintf(w, "tree %s\n", c.TreeHash.String()); err != nil { + return err + } + + for _, parent := range c.ParentHashes { + if _, err = fmt.Fprintf(w, "parent %s\n", parent.String()); err != nil { + return err + } + } + + if _, err = fmt.Fprint(w, "author "); err != nil { + return err + } + + if err = c.Author.Encode(w); err != nil { + return err + } + + if _, err = fmt.Fprint(w, "\ncommitter "); err != nil { + return err + } + + if err = c.Committer.Encode(w); err != nil { + return err + } + + if c.PGPSignature != "" && includeSig { + if _, err = fmt.Fprint(w, "\n"+headerpgp+" "); err != nil { + return err + } + + // Split all the signature lines and re-write with a left padding and + // newline. Use join for this so it's clear that a newline should not be + // added after this section, as it will be added when the message is + // printed. + signature := strings.TrimSuffix(c.PGPSignature, "\n") + lines := strings.Split(signature, "\n") + if _, err = fmt.Fprint(w, strings.Join(lines, "\n ")); err != nil { + return err + } + } + + if _, err = fmt.Fprintf(w, "\n\n%s", c.Message); err != nil { + return err + } + + return err +} + +// Stats returns the stats of a commit. +func (c *Commit) Stats() (FileStats, error) { + return c.StatsContext(context.Background()) +} + +// StatsContext returns the stats of a commit. Error will be return if context +// expires. Provided context must be non-nil. +func (c *Commit) StatsContext(ctx context.Context) (FileStats, error) { + fromTree, err := c.Tree() + if err != nil { + return nil, err + } + + toTree := &Tree{} + if c.NumParents() != 0 { + firstParent, err := c.Parents().Next() + if err != nil { + return nil, err + } + + toTree, err = firstParent.Tree() + if err != nil { + return nil, err + } + } + + patch, err := toTree.PatchContext(ctx, fromTree) + if err != nil { + return nil, err + } + + return getFileStatsFromFilePatches(patch.FilePatches()), nil +} + +func (c *Commit) String() string { + return fmt.Sprintf( + "%s %s\nAuthor: %s\nDate: %s\n\n%s\n", + plumbing.CommitObject, c.Hash, c.Author.String(), + c.Author.When.Format(DateFormat), indent(c.Message), + ) +} + +// Verify performs PGP verification of the commit with a provided armored +// keyring and returns openpgp.Entity associated with verifying key on success. +func (c *Commit) Verify(armoredKeyRing string) (*openpgp.Entity, error) { + keyRingReader := strings.NewReader(armoredKeyRing) + keyring, err := openpgp.ReadArmoredKeyRing(keyRingReader) + if err != nil { + return nil, err + } + + // Extract signature. + signature := strings.NewReader(c.PGPSignature) + + encoded := &plumbing.MemoryObject{} + // Encode commit components, excluding signature and get a reader object. + if err := c.EncodeWithoutSignature(encoded); err != nil { + return nil, err + } + er, err := encoded.Reader() + if err != nil { + return nil, err + } + + return openpgp.CheckArmoredDetachedSignature(keyring, er, signature) +} + +func indent(t string) string { + var output []string + for _, line := range strings.Split(t, "\n") { + if len(line) != 0 { + line = " " + line + } + + output = append(output, line) + } + + return strings.Join(output, "\n") +} + +// CommitIter is a generic closable interface for iterating over commits. +type CommitIter interface { + Next() (*Commit, error) + ForEach(func(*Commit) error) error + Close() +} + +// storerCommitIter provides an iterator from commits in an EncodedObjectStorer. +type storerCommitIter struct { + storer.EncodedObjectIter + s storer.EncodedObjectStorer +} + +// NewCommitIter takes a storer.EncodedObjectStorer and a +// storer.EncodedObjectIter and returns a CommitIter that iterates over all +// commits contained in the storer.EncodedObjectIter. +// +// Any non-commit object returned by the storer.EncodedObjectIter is skipped. +func NewCommitIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) CommitIter { + return &storerCommitIter{iter, s} +} + +// Next moves the iterator to the next commit and returns a pointer to it. If +// there are no more commits, it returns io.EOF. +func (iter *storerCommitIter) Next() (*Commit, error) { + obj, err := iter.EncodedObjectIter.Next() + if err != nil { + return nil, err + } + + return DecodeCommit(iter.s, obj) +} + +// ForEach call the cb function for each commit contained on this iter until +// an error appends or the end of the iter is reached. If ErrStop is sent +// the iteration is stopped but no error is returned. The iterator is closed. +func (iter *storerCommitIter) ForEach(cb func(*Commit) error) error { + return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { + c, err := DecodeCommit(iter.s, obj) + if err != nil { + return err + } + + return cb(c) + }) +} + +func (iter *storerCommitIter) Close() { + iter.EncodedObjectIter.Close() +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker.go new file mode 100644 index 0000000000..a96b6a4cf0 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker.go @@ -0,0 +1,327 @@ +package object + +import ( + "container/list" + "io" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/storage" +) + +type commitPreIterator struct { + seenExternal map[plumbing.Hash]bool + seen map[plumbing.Hash]bool + stack []CommitIter + start *Commit +} + +// NewCommitPreorderIter returns a CommitIter that walks the commit history, +// starting at the given commit and visiting its parents in pre-order. +// The given callback will be called for each visited commit. Each commit will +// be visited only once. If the callback returns an error, walking will stop +// and will return the error. Other errors might be returned if the history +// cannot be traversed (e.g. missing objects). Ignore allows to skip some +// commits from being iterated. +func NewCommitPreorderIter( + c *Commit, + seenExternal map[plumbing.Hash]bool, + ignore []plumbing.Hash, +) CommitIter { + seen := make(map[plumbing.Hash]bool) + for _, h := range ignore { + seen[h] = true + } + + return &commitPreIterator{ + seenExternal: seenExternal, + seen: seen, + stack: make([]CommitIter, 0), + start: c, + } +} + +func (w *commitPreIterator) Next() (*Commit, error) { + var c *Commit + for { + if w.start != nil { + c = w.start + w.start = nil + } else { + current := len(w.stack) - 1 + if current < 0 { + return nil, io.EOF + } + + var err error + c, err = w.stack[current].Next() + if err == io.EOF { + w.stack = w.stack[:current] + continue + } + + if err != nil { + return nil, err + } + } + + if w.seen[c.Hash] || w.seenExternal[c.Hash] { + continue + } + + w.seen[c.Hash] = true + + if c.NumParents() > 0 { + w.stack = append(w.stack, filteredParentIter(c, w.seen)) + } + + return c, nil + } +} + +func filteredParentIter(c *Commit, seen map[plumbing.Hash]bool) CommitIter { + var hashes []plumbing.Hash + for _, h := range c.ParentHashes { + if !seen[h] { + hashes = append(hashes, h) + } + } + + return NewCommitIter(c.s, + storer.NewEncodedObjectLookupIter(c.s, plumbing.CommitObject, hashes), + ) +} + +func (w *commitPreIterator) ForEach(cb func(*Commit) error) error { + for { + c, err := w.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + + err = cb(c) + if err == storer.ErrStop { + break + } + if err != nil { + return err + } + } + + return nil +} + +func (w *commitPreIterator) Close() {} + +type commitPostIterator struct { + stack []*Commit + seen map[plumbing.Hash]bool +} + +// NewCommitPostorderIter returns a CommitIter that walks the commit +// history like WalkCommitHistory but in post-order. This means that after +// walking a merge commit, the merged commit will be walked before the base +// it was merged on. This can be useful if you wish to see the history in +// chronological order. Ignore allows to skip some commits from being iterated. +func NewCommitPostorderIter(c *Commit, ignore []plumbing.Hash) CommitIter { + seen := make(map[plumbing.Hash]bool) + for _, h := range ignore { + seen[h] = true + } + + return &commitPostIterator{ + stack: []*Commit{c}, + seen: seen, + } +} + +func (w *commitPostIterator) Next() (*Commit, error) { + for { + if len(w.stack) == 0 { + return nil, io.EOF + } + + c := w.stack[len(w.stack)-1] + w.stack = w.stack[:len(w.stack)-1] + + if w.seen[c.Hash] { + continue + } + + w.seen[c.Hash] = true + + return c, c.Parents().ForEach(func(p *Commit) error { + w.stack = append(w.stack, p) + return nil + }) + } +} + +func (w *commitPostIterator) ForEach(cb func(*Commit) error) error { + for { + c, err := w.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + + err = cb(c) + if err == storer.ErrStop { + break + } + if err != nil { + return err + } + } + + return nil +} + +func (w *commitPostIterator) Close() {} + +// commitAllIterator stands for commit iterator for all refs. +type commitAllIterator struct { + // currCommit points to the current commit. + currCommit *list.Element +} + +// NewCommitAllIter returns a new commit iterator for all refs. +// repoStorer is a repo Storer used to get commits and references. +// commitIterFunc is a commit iterator function, used to iterate through ref commits in chosen order +func NewCommitAllIter(repoStorer storage.Storer, commitIterFunc func(*Commit) CommitIter) (CommitIter, error) { + commitsPath := list.New() + commitsLookup := make(map[plumbing.Hash]*list.Element) + head, err := storer.ResolveReference(repoStorer, plumbing.HEAD) + if err == nil { + err = addReference(repoStorer, commitIterFunc, head, commitsPath, commitsLookup) + } + + if err != nil && err != plumbing.ErrReferenceNotFound { + return nil, err + } + + // add all references along with the HEAD + refIter, err := repoStorer.IterReferences() + if err != nil { + return nil, err + } + defer refIter.Close() + + for { + ref, err := refIter.Next() + if err == io.EOF { + break + } + + if err == plumbing.ErrReferenceNotFound { + continue + } + + if err != nil { + return nil, err + } + + if err = addReference(repoStorer, commitIterFunc, ref, commitsPath, commitsLookup); err != nil { + return nil, err + } + } + + return &commitAllIterator{commitsPath.Front()}, nil +} + +func addReference( + repoStorer storage.Storer, + commitIterFunc func(*Commit) CommitIter, + ref *plumbing.Reference, + commitsPath *list.List, + commitsLookup map[plumbing.Hash]*list.Element) error { + + _, exists := commitsLookup[ref.Hash()] + if exists { + // we already have it - skip the reference. + return nil + } + + refCommit, _ := GetCommit(repoStorer, ref.Hash()) + if refCommit == nil { + // if it's not a commit - skip it. + return nil + } + + var ( + refCommits []*Commit + parent *list.Element + ) + // collect all ref commits to add + commitIter := commitIterFunc(refCommit) + for c, e := commitIter.Next(); e == nil; { + parent, exists = commitsLookup[c.Hash] + if exists { + break + } + refCommits = append(refCommits, c) + c, e = commitIter.Next() + } + commitIter.Close() + + if parent == nil { + // common parent - not found + // add all commits to the path from this ref (maybe it's a HEAD and we don't have anything, yet) + for _, c := range refCommits { + parent = commitsPath.PushBack(c) + commitsLookup[c.Hash] = parent + } + } else { + // add ref's commits to the path in reverse order (from the latest) + for i := len(refCommits) - 1; i >= 0; i-- { + c := refCommits[i] + // insert before found common parent + parent = commitsPath.InsertBefore(c, parent) + commitsLookup[c.Hash] = parent + } + } + + return nil +} + +func (it *commitAllIterator) Next() (*Commit, error) { + if it.currCommit == nil { + return nil, io.EOF + } + + c := it.currCommit.Value.(*Commit) + it.currCommit = it.currCommit.Next() + + return c, nil +} + +func (it *commitAllIterator) ForEach(cb func(*Commit) error) error { + for { + c, err := it.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + + err = cb(c) + if err == storer.ErrStop { + break + } + if err != nil { + return err + } + } + + return nil +} + +func (it *commitAllIterator) Close() { + it.currCommit = nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs.go new file mode 100644 index 0000000000..8047fa9bc0 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs.go @@ -0,0 +1,100 @@ +package object + +import ( + "io" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/storer" +) + +type bfsCommitIterator struct { + seenExternal map[plumbing.Hash]bool + seen map[plumbing.Hash]bool + queue []*Commit +} + +// NewCommitIterBSF returns a CommitIter that walks the commit history, +// starting at the given commit and visiting its parents in pre-order. +// The given callback will be called for each visited commit. Each commit will +// be visited only once. If the callback returns an error, walking will stop +// and will return the error. Other errors might be returned if the history +// cannot be traversed (e.g. missing objects). Ignore allows to skip some +// commits from being iterated. +func NewCommitIterBSF( + c *Commit, + seenExternal map[plumbing.Hash]bool, + ignore []plumbing.Hash, +) CommitIter { + seen := make(map[plumbing.Hash]bool) + for _, h := range ignore { + seen[h] = true + } + + return &bfsCommitIterator{ + seenExternal: seenExternal, + seen: seen, + queue: []*Commit{c}, + } +} + +func (w *bfsCommitIterator) appendHash(store storer.EncodedObjectStorer, h plumbing.Hash) error { + if w.seen[h] || w.seenExternal[h] { + return nil + } + c, err := GetCommit(store, h) + if err != nil { + return err + } + w.queue = append(w.queue, c) + return nil +} + +func (w *bfsCommitIterator) Next() (*Commit, error) { + var c *Commit + for { + if len(w.queue) == 0 { + return nil, io.EOF + } + c = w.queue[0] + w.queue = w.queue[1:] + + if w.seen[c.Hash] || w.seenExternal[c.Hash] { + continue + } + + w.seen[c.Hash] = true + + for _, h := range c.ParentHashes { + err := w.appendHash(c.s, h) + if err != nil { + return nil, err + } + } + + return c, nil + } +} + +func (w *bfsCommitIterator) ForEach(cb func(*Commit) error) error { + for { + c, err := w.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + + err = cb(c) + if err == storer.ErrStop { + break + } + if err != nil { + return err + } + } + + return nil +} + +func (w *bfsCommitIterator) Close() {} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs_filtered.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs_filtered.go new file mode 100644 index 0000000000..9d518133e2 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs_filtered.go @@ -0,0 +1,175 @@ +package object + +import ( + "io" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/storer" +) + +// NewFilterCommitIter returns a CommitIter that walks the commit history, +// starting at the passed commit and visiting its parents in Breadth-first order. +// The commits returned by the CommitIter will validate the passed CommitFilter. +// The history won't be transversed beyond a commit if isLimit is true for it. +// Each commit will be visited only once. +// If the commit history can not be traversed, or the Close() method is called, +// the CommitIter won't return more commits. +// If no isValid is passed, all ancestors of from commit will be valid. +// If no isLimit is limit, all ancestors of all commits will be visited. +func NewFilterCommitIter( + from *Commit, + isValid *CommitFilter, + isLimit *CommitFilter, +) CommitIter { + var validFilter CommitFilter + if isValid == nil { + validFilter = func(_ *Commit) bool { + return true + } + } else { + validFilter = *isValid + } + + var limitFilter CommitFilter + if isLimit == nil { + limitFilter = func(_ *Commit) bool { + return false + } + } else { + limitFilter = *isLimit + } + + return &filterCommitIter{ + isValid: validFilter, + isLimit: limitFilter, + visited: map[plumbing.Hash]struct{}{}, + queue: []*Commit{from}, + } +} + +// CommitFilter returns a boolean for the passed Commit +type CommitFilter func(*Commit) bool + +// filterCommitIter implements CommitIter +type filterCommitIter struct { + isValid CommitFilter + isLimit CommitFilter + visited map[plumbing.Hash]struct{} + queue []*Commit + lastErr error +} + +// Next returns the next commit of the CommitIter. +// It will return io.EOF if there are no more commits to visit, +// or an error if the history could not be traversed. +func (w *filterCommitIter) Next() (*Commit, error) { + var commit *Commit + var err error + for { + commit, err = w.popNewFromQueue() + if err != nil { + return nil, w.close(err) + } + + w.visited[commit.Hash] = struct{}{} + + if !w.isLimit(commit) { + err = w.addToQueue(commit.s, commit.ParentHashes...) + if err != nil { + return nil, w.close(err) + } + } + + if w.isValid(commit) { + return commit, nil + } + } +} + +// ForEach runs the passed callback over each Commit returned by the CommitIter +// until the callback returns an error or there is no more commits to traverse. +func (w *filterCommitIter) ForEach(cb func(*Commit) error) error { + for { + commit, err := w.Next() + if err == io.EOF { + break + } + + if err != nil { + return err + } + + if err := cb(commit); err == storer.ErrStop { + break + } else if err != nil { + return err + } + } + + return nil +} + +// Error returns the error that caused that the CommitIter is no longer returning commits +func (w *filterCommitIter) Error() error { + return w.lastErr +} + +// Close closes the CommitIter +func (w *filterCommitIter) Close() { + w.visited = map[plumbing.Hash]struct{}{} + w.queue = []*Commit{} + w.isLimit = nil + w.isValid = nil +} + +// close closes the CommitIter with an error +func (w *filterCommitIter) close(err error) error { + w.Close() + w.lastErr = err + return err +} + +// popNewFromQueue returns the first new commit from the internal fifo queue, +// or an io.EOF error if the queue is empty +func (w *filterCommitIter) popNewFromQueue() (*Commit, error) { + var first *Commit + for { + if len(w.queue) == 0 { + if w.lastErr != nil { + return nil, w.lastErr + } + + return nil, io.EOF + } + + first = w.queue[0] + w.queue = w.queue[1:] + if _, ok := w.visited[first.Hash]; ok { + continue + } + + return first, nil + } +} + +// addToQueue adds the passed commits to the internal fifo queue if they weren't seen +// or returns an error if the passed hashes could not be used to get valid commits +func (w *filterCommitIter) addToQueue( + store storer.EncodedObjectStorer, + hashes ...plumbing.Hash, +) error { + for _, hash := range hashes { + if _, ok := w.visited[hash]; ok { + continue + } + + commit, err := GetCommit(store, hash) + if err != nil { + return err + } + + w.queue = append(w.queue, commit) + } + + return nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_ctime.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_ctime.go new file mode 100644 index 0000000000..fbddf1d238 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_ctime.go @@ -0,0 +1,103 @@ +package object + +import ( + "io" + + "github.com/emirpasic/gods/trees/binaryheap" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/storer" +) + +type commitIteratorByCTime struct { + seenExternal map[plumbing.Hash]bool + seen map[plumbing.Hash]bool + heap *binaryheap.Heap +} + +// NewCommitIterCTime returns a CommitIter that walks the commit history, +// starting at the given commit and visiting its parents while preserving Committer Time order. +// this appears to be the closest order to `git log` +// The given callback will be called for each visited commit. Each commit will +// be visited only once. If the callback returns an error, walking will stop +// and will return the error. Other errors might be returned if the history +// cannot be traversed (e.g. missing objects). Ignore allows to skip some +// commits from being iterated. +func NewCommitIterCTime( + c *Commit, + seenExternal map[plumbing.Hash]bool, + ignore []plumbing.Hash, +) CommitIter { + seen := make(map[plumbing.Hash]bool) + for _, h := range ignore { + seen[h] = true + } + + heap := binaryheap.NewWith(func(a, b interface{}) int { + if a.(*Commit).Committer.When.Before(b.(*Commit).Committer.When) { + return 1 + } + return -1 + }) + heap.Push(c) + + return &commitIteratorByCTime{ + seenExternal: seenExternal, + seen: seen, + heap: heap, + } +} + +func (w *commitIteratorByCTime) Next() (*Commit, error) { + var c *Commit + for { + cIn, ok := w.heap.Pop() + if !ok { + return nil, io.EOF + } + c = cIn.(*Commit) + + if w.seen[c.Hash] || w.seenExternal[c.Hash] { + continue + } + + w.seen[c.Hash] = true + + for _, h := range c.ParentHashes { + if w.seen[h] || w.seenExternal[h] { + continue + } + pc, err := GetCommit(c.s, h) + if err != nil { + return nil, err + } + w.heap.Push(pc) + } + + return c, nil + } +} + +func (w *commitIteratorByCTime) ForEach(cb func(*Commit) error) error { + for { + c, err := w.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + + err = cb(c) + if err == storer.ErrStop { + break + } + if err != nil { + return err + } + } + + return nil +} + +func (w *commitIteratorByCTime) Close() {} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_limit.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_limit.go new file mode 100644 index 0000000000..ac56a71c41 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_limit.go @@ -0,0 +1,65 @@ +package object + +import ( + "io" + "time" + + "github.com/go-git/go-git/v5/plumbing/storer" +) + +type commitLimitIter struct { + sourceIter CommitIter + limitOptions LogLimitOptions +} + +type LogLimitOptions struct { + Since *time.Time + Until *time.Time +} + +func NewCommitLimitIterFromIter(commitIter CommitIter, limitOptions LogLimitOptions) CommitIter { + iterator := new(commitLimitIter) + iterator.sourceIter = commitIter + iterator.limitOptions = limitOptions + return iterator +} + +func (c *commitLimitIter) Next() (*Commit, error) { + for { + commit, err := c.sourceIter.Next() + if err != nil { + return nil, err + } + + if c.limitOptions.Since != nil && commit.Committer.When.Before(*c.limitOptions.Since) { + continue + } + if c.limitOptions.Until != nil && commit.Committer.When.After(*c.limitOptions.Until) { + continue + } + return commit, nil + } +} + +func (c *commitLimitIter) ForEach(cb func(*Commit) error) error { + for { + commit, nextErr := c.Next() + if nextErr == io.EOF { + break + } + if nextErr != nil { + return nextErr + } + err := cb(commit) + if err == storer.ErrStop { + return nil + } else if err != nil { + return err + } + } + return nil +} + +func (c *commitLimitIter) Close() { + c.sourceIter.Close() +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_path.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_path.go new file mode 100644 index 0000000000..aa0ca15fd0 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_path.go @@ -0,0 +1,160 @@ +package object + +import ( + "io" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/storer" +) + +type commitPathIter struct { + pathFilter func(string) bool + sourceIter CommitIter + currentCommit *Commit + checkParent bool +} + +// NewCommitPathIterFromIter returns a commit iterator which performs diffTree between +// successive trees returned from the commit iterator from the argument. The purpose of this is +// to find the commits that explain how the files that match the path came to be. +// If checkParent is true then the function double checks if potential parent (next commit in a path) +// is one of the parents in the tree (it's used by `git log --all`). +// pathFilter is a function that takes path of file as argument and returns true if we want it +func NewCommitPathIterFromIter(pathFilter func(string) bool, commitIter CommitIter, checkParent bool) CommitIter { + iterator := new(commitPathIter) + iterator.sourceIter = commitIter + iterator.pathFilter = pathFilter + iterator.checkParent = checkParent + return iterator +} + +// NewCommitFileIterFromIter is kept for compatibility, can be replaced with NewCommitPathIterFromIter +func NewCommitFileIterFromIter(fileName string, commitIter CommitIter, checkParent bool) CommitIter { + return NewCommitPathIterFromIter( + func(path string) bool { + return path == fileName + }, + commitIter, + checkParent, + ) +} + +func (c *commitPathIter) Next() (*Commit, error) { + if c.currentCommit == nil { + var err error + c.currentCommit, err = c.sourceIter.Next() + if err != nil { + return nil, err + } + } + commit, commitErr := c.getNextFileCommit() + + // Setting current-commit to nil to prevent unwanted states when errors are raised + if commitErr != nil { + c.currentCommit = nil + } + return commit, commitErr +} + +func (c *commitPathIter) getNextFileCommit() (*Commit, error) { + for { + // Parent-commit can be nil if the current-commit is the initial commit + parentCommit, parentCommitErr := c.sourceIter.Next() + if parentCommitErr != nil { + // If the parent-commit is beyond the initial commit, keep it nil + if parentCommitErr != io.EOF { + return nil, parentCommitErr + } + parentCommit = nil + } + + // Fetch the trees of the current and parent commits + currentTree, currTreeErr := c.currentCommit.Tree() + if currTreeErr != nil { + return nil, currTreeErr + } + + var parentTree *Tree + if parentCommit != nil { + var parentTreeErr error + parentTree, parentTreeErr = parentCommit.Tree() + if parentTreeErr != nil { + return nil, parentTreeErr + } + } + + // Find diff between current and parent trees + changes, diffErr := DiffTree(currentTree, parentTree) + if diffErr != nil { + return nil, diffErr + } + + found := c.hasFileChange(changes, parentCommit) + + // Storing the current-commit in-case a change is found, and + // Updating the current-commit for the next-iteration + prevCommit := c.currentCommit + c.currentCommit = parentCommit + + if found { + return prevCommit, nil + } + + // If not matches found and if parent-commit is beyond the initial commit, then return with EOF + if parentCommit == nil { + return nil, io.EOF + } + } +} + +func (c *commitPathIter) hasFileChange(changes Changes, parent *Commit) bool { + for _, change := range changes { + if !c.pathFilter(change.name()) { + continue + } + + // filename matches, now check if source iterator contains all commits (from all refs) + if c.checkParent { + if parent != nil && isParentHash(parent.Hash, c.currentCommit) { + return true + } + continue + } + + return true + } + + return false +} + +func isParentHash(hash plumbing.Hash, commit *Commit) bool { + for _, h := range commit.ParentHashes { + if h == hash { + return true + } + } + return false +} + +func (c *commitPathIter) ForEach(cb func(*Commit) error) error { + for { + commit, nextErr := c.Next() + if nextErr == io.EOF { + break + } + if nextErr != nil { + return nextErr + } + err := cb(commit) + if err == storer.ErrStop { + return nil + } else if err != nil { + return err + } + } + return nil +} + +func (c *commitPathIter) Close() { + c.sourceIter.Close() +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/common.go new file mode 100644 index 0000000000..3591f5f0a6 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/common.go @@ -0,0 +1,12 @@ +package object + +import ( + "bufio" + "sync" +) + +var bufPool = sync.Pool{ + New: func() interface{} { + return bufio.NewReader(nil) + }, +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/difftree.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/difftree.go new file mode 100644 index 0000000000..7c2222702c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/difftree.go @@ -0,0 +1,98 @@ +package object + +import ( + "bytes" + "context" + + "github.com/go-git/go-git/v5/utils/merkletrie" + "github.com/go-git/go-git/v5/utils/merkletrie/noder" +) + +// DiffTree compares the content and mode of the blobs found via two +// tree objects. +// DiffTree does not perform rename detection, use DiffTreeWithOptions +// instead to detect renames. +func DiffTree(a, b *Tree) (Changes, error) { + return DiffTreeContext(context.Background(), a, b) +} + +// DiffTreeContext compares the content and mode of the blobs found via two +// tree objects. Provided context must be non-nil. +// An error will be returned if context expires. +func DiffTreeContext(ctx context.Context, a, b *Tree) (Changes, error) { + return DiffTreeWithOptions(ctx, a, b, nil) +} + +// DiffTreeOptions are the configurable options when performing a diff tree. +type DiffTreeOptions struct { + // DetectRenames is whether the diff tree will use rename detection. + DetectRenames bool + // RenameScore is the threshold to of similarity between files to consider + // that a pair of delete and insert are a rename. The number must be + // exactly between 0 and 100. + RenameScore uint + // RenameLimit is the maximum amount of files that can be compared when + // detecting renames. The number of comparisons that have to be performed + // is equal to the number of deleted files * the number of added files. + // That means, that if 100 files were deleted and 50 files were added, 5000 + // file comparisons may be needed. So, if the rename limit is 50, the number + // of both deleted and added needs to be equal or less than 50. + // A value of 0 means no limit. + RenameLimit uint + // OnlyExactRenames performs only detection of exact renames and will not perform + // any detection of renames based on file similarity. + OnlyExactRenames bool +} + +// DefaultDiffTreeOptions are the default and recommended options for the +// diff tree. +var DefaultDiffTreeOptions = &DiffTreeOptions{ + DetectRenames: true, + RenameScore: 60, + RenameLimit: 0, + OnlyExactRenames: false, +} + +// DiffTreeWithOptions compares the content and mode of the blobs found +// via two tree objects with the given options. The provided context +// must be non-nil. +// If no options are passed, no rename detection will be performed. The +// recommended options are DefaultDiffTreeOptions. +// An error will be returned if the context expires. +// This function will be deprecated and removed in v6 so the default +// behaviour of DiffTree is to detect renames. +func DiffTreeWithOptions( + ctx context.Context, + a, b *Tree, + opts *DiffTreeOptions, +) (Changes, error) { + from := NewTreeRootNode(a) + to := NewTreeRootNode(b) + + hashEqual := func(a, b noder.Hasher) bool { + return bytes.Equal(a.Hash(), b.Hash()) + } + + merkletrieChanges, err := merkletrie.DiffTreeContext(ctx, from, to, hashEqual) + if err != nil { + if err == merkletrie.ErrCanceled { + return nil, ErrCanceled + } + return nil, err + } + + changes, err := newChanges(merkletrieChanges) + if err != nil { + return nil, err + } + + if opts == nil { + opts = new(DiffTreeOptions) + } + + if opts.DetectRenames { + return DetectRenames(changes, opts) + } + + return changes, nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/file.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/file.go new file mode 100644 index 0000000000..6cc5367d8d --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/file.go @@ -0,0 +1,137 @@ +package object + +import ( + "bytes" + "io" + "strings" + + "github.com/go-git/go-git/v5/plumbing/filemode" + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/utils/binary" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +// File represents git file objects. +type File struct { + // Name is the path of the file. It might be relative to a tree, + // depending of the function that generates it. + Name string + // Mode is the file mode. + Mode filemode.FileMode + // Blob with the contents of the file. + Blob +} + +// NewFile returns a File based on the given blob object +func NewFile(name string, m filemode.FileMode, b *Blob) *File { + return &File{Name: name, Mode: m, Blob: *b} +} + +// Contents returns the contents of a file as a string. +func (f *File) Contents() (content string, err error) { + reader, err := f.Reader() + if err != nil { + return "", err + } + defer ioutil.CheckClose(reader, &err) + + buf := new(bytes.Buffer) + if _, err := buf.ReadFrom(reader); err != nil { + return "", err + } + + return buf.String(), nil +} + +// IsBinary returns if the file is binary or not +func (f *File) IsBinary() (bin bool, err error) { + reader, err := f.Reader() + if err != nil { + return false, err + } + defer ioutil.CheckClose(reader, &err) + + return binary.IsBinary(reader) +} + +// Lines returns a slice of lines from the contents of a file, stripping +// all end of line characters. If the last line is empty (does not end +// in an end of line), it is also stripped. +func (f *File) Lines() ([]string, error) { + content, err := f.Contents() + if err != nil { + return nil, err + } + + splits := strings.Split(content, "\n") + // remove the last line if it is empty + if splits[len(splits)-1] == "" { + return splits[:len(splits)-1], nil + } + + return splits, nil +} + +// FileIter provides an iterator for the files in a tree. +type FileIter struct { + s storer.EncodedObjectStorer + w TreeWalker +} + +// NewFileIter takes a storer.EncodedObjectStorer and a Tree and returns a +// *FileIter that iterates over all files contained in the tree, recursively. +func NewFileIter(s storer.EncodedObjectStorer, t *Tree) *FileIter { + return &FileIter{s: s, w: *NewTreeWalker(t, true, nil)} +} + +// Next moves the iterator to the next file and returns a pointer to it. If +// there are no more files, it returns io.EOF. +func (iter *FileIter) Next() (*File, error) { + for { + name, entry, err := iter.w.Next() + if err != nil { + return nil, err + } + + if entry.Mode == filemode.Dir || entry.Mode == filemode.Submodule { + continue + } + + blob, err := GetBlob(iter.s, entry.Hash) + if err != nil { + return nil, err + } + + return NewFile(name, entry.Mode, blob), nil + } +} + +// ForEach call the cb function for each file contained in this iter until +// an error happens or the end of the iter is reached. If plumbing.ErrStop is sent +// the iteration is stop but no error is returned. The iterator is closed. +func (iter *FileIter) ForEach(cb func(*File) error) error { + defer iter.Close() + + for { + f, err := iter.Next() + if err != nil { + if err == io.EOF { + return nil + } + + return err + } + + if err := cb(f); err != nil { + if err == storer.ErrStop { + return nil + } + + return err + } + } +} + +func (iter *FileIter) Close() { + iter.w.Close() +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/merge_base.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/merge_base.go new file mode 100644 index 0000000000..b412361d02 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/merge_base.go @@ -0,0 +1,210 @@ +package object + +import ( + "fmt" + "sort" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/storer" +) + +// errIsReachable is thrown when first commit is an ancestor of the second +var errIsReachable = fmt.Errorf("first is reachable from second") + +// MergeBase mimics the behavior of `git merge-base actual other`, returning the +// best common ancestor between the actual and the passed one. +// The best common ancestors can not be reached from other common ancestors. +func (c *Commit) MergeBase(other *Commit) ([]*Commit, error) { + // use sortedByCommitDateDesc strategy + sorted := sortByCommitDateDesc(c, other) + newer := sorted[0] + older := sorted[1] + + newerHistory, err := ancestorsIndex(older, newer) + if err == errIsReachable { + return []*Commit{older}, nil + } + + if err != nil { + return nil, err + } + + var res []*Commit + inNewerHistory := isInIndexCommitFilter(newerHistory) + resIter := NewFilterCommitIter(older, &inNewerHistory, &inNewerHistory) + _ = resIter.ForEach(func(commit *Commit) error { + res = append(res, commit) + return nil + }) + + return Independents(res) +} + +// IsAncestor returns true if the actual commit is ancestor of the passed one. +// It returns an error if the history is not transversable +// It mimics the behavior of `git merge --is-ancestor actual other` +func (c *Commit) IsAncestor(other *Commit) (bool, error) { + found := false + iter := NewCommitPreorderIter(other, nil, nil) + err := iter.ForEach(func(comm *Commit) error { + if comm.Hash != c.Hash { + return nil + } + + found = true + return storer.ErrStop + }) + + return found, err +} + +// ancestorsIndex returns a map with the ancestors of the starting commit if the +// excluded one is not one of them. It returns errIsReachable if the excluded commit +// is ancestor of the starting, or another error if the history is not traversable. +func ancestorsIndex(excluded, starting *Commit) (map[plumbing.Hash]struct{}, error) { + if excluded.Hash.String() == starting.Hash.String() { + return nil, errIsReachable + } + + startingHistory := map[plumbing.Hash]struct{}{} + startingIter := NewCommitIterBSF(starting, nil, nil) + err := startingIter.ForEach(func(commit *Commit) error { + if commit.Hash == excluded.Hash { + return errIsReachable + } + + startingHistory[commit.Hash] = struct{}{} + return nil + }) + + if err != nil { + return nil, err + } + + return startingHistory, nil +} + +// Independents returns a subset of the passed commits, that are not reachable the others +// It mimics the behavior of `git merge-base --independent commit...`. +func Independents(commits []*Commit) ([]*Commit, error) { + // use sortedByCommitDateDesc strategy + candidates := sortByCommitDateDesc(commits...) + candidates = removeDuplicated(candidates) + + seen := map[plumbing.Hash]struct{}{} + var isLimit CommitFilter = func(commit *Commit) bool { + _, ok := seen[commit.Hash] + return ok + } + + if len(candidates) < 2 { + return candidates, nil + } + + pos := 0 + for { + from := candidates[pos] + others := remove(candidates, from) + fromHistoryIter := NewFilterCommitIter(from, nil, &isLimit) + err := fromHistoryIter.ForEach(func(fromAncestor *Commit) error { + for _, other := range others { + if fromAncestor.Hash == other.Hash { + candidates = remove(candidates, other) + others = remove(others, other) + } + } + + if len(candidates) == 1 { + return storer.ErrStop + } + + seen[fromAncestor.Hash] = struct{}{} + return nil + }) + + if err != nil { + return nil, err + } + + nextPos := indexOf(candidates, from) + 1 + if nextPos >= len(candidates) { + break + } + + pos = nextPos + } + + return candidates, nil +} + +// sortByCommitDateDesc returns the passed commits, sorted by `committer.When desc` +// +// Following this strategy, it is tried to reduce the time needed when walking +// the history from one commit to reach the others. It is assumed that ancestors +// use to be committed before its descendant; +// That way `Independents(A^, A)` will be processed as being `Independents(A, A^)`; +// so starting by `A` it will be reached `A^` way sooner than walking from `A^` +// to the initial commit, and then from `A` to `A^`. +func sortByCommitDateDesc(commits ...*Commit) []*Commit { + sorted := make([]*Commit, len(commits)) + copy(sorted, commits) + sort.Slice(sorted, func(i, j int) bool { + return sorted[i].Committer.When.After(sorted[j].Committer.When) + }) + + return sorted +} + +// indexOf returns the first position where target was found in the passed commits +func indexOf(commits []*Commit, target *Commit) int { + for i, commit := range commits { + if target.Hash == commit.Hash { + return i + } + } + + return -1 +} + +// remove returns the passed commits excluding the commit toDelete +func remove(commits []*Commit, toDelete *Commit) []*Commit { + res := make([]*Commit, len(commits)) + j := 0 + for _, commit := range commits { + if commit.Hash == toDelete.Hash { + continue + } + + res[j] = commit + j++ + } + + return res[:j] +} + +// removeDuplicated removes duplicated commits from the passed slice of commits +func removeDuplicated(commits []*Commit) []*Commit { + seen := make(map[plumbing.Hash]struct{}, len(commits)) + res := make([]*Commit, len(commits)) + j := 0 + for _, commit := range commits { + if _, ok := seen[commit.Hash]; ok { + continue + } + + seen[commit.Hash] = struct{}{} + res[j] = commit + j++ + } + + return res[:j] +} + +// isInIndexCommitFilter returns a commitFilter that returns true +// if the commit is in the passed index. +func isInIndexCommitFilter(index map[plumbing.Hash]struct{}) CommitFilter { + return func(c *Commit) bool { + _, ok := index[c.Hash] + return ok + } +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/object.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/object.go new file mode 100644 index 0000000000..13b1e91c9c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/object.go @@ -0,0 +1,239 @@ +// Package object contains implementations of all Git objects and utility +// functions to work with them. +package object + +import ( + "bytes" + "errors" + "fmt" + "io" + "strconv" + "time" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/storer" +) + +// ErrUnsupportedObject trigger when a non-supported object is being decoded. +var ErrUnsupportedObject = errors.New("unsupported object type") + +// Object is a generic representation of any git object. It is implemented by +// Commit, Tree, Blob, and Tag, and includes the functions that are common to +// them. +// +// Object is returned when an object can be of any type. It is frequently used +// with a type cast to acquire the specific type of object: +// +// func process(obj Object) { +// switch o := obj.(type) { +// case *Commit: +// // o is a Commit +// case *Tree: +// // o is a Tree +// case *Blob: +// // o is a Blob +// case *Tag: +// // o is a Tag +// } +// } +// +// This interface is intentionally different from plumbing.EncodedObject, which +// is a lower level interface used by storage implementations to read and write +// objects in its encoded form. +type Object interface { + ID() plumbing.Hash + Type() plumbing.ObjectType + Decode(plumbing.EncodedObject) error + Encode(plumbing.EncodedObject) error +} + +// GetObject gets an object from an object storer and decodes it. +func GetObject(s storer.EncodedObjectStorer, h plumbing.Hash) (Object, error) { + o, err := s.EncodedObject(plumbing.AnyObject, h) + if err != nil { + return nil, err + } + + return DecodeObject(s, o) +} + +// DecodeObject decodes an encoded object into an Object and associates it to +// the given object storer. +func DecodeObject(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (Object, error) { + switch o.Type() { + case plumbing.CommitObject: + return DecodeCommit(s, o) + case plumbing.TreeObject: + return DecodeTree(s, o) + case plumbing.BlobObject: + return DecodeBlob(o) + case plumbing.TagObject: + return DecodeTag(s, o) + default: + return nil, plumbing.ErrInvalidType + } +} + +// DateFormat is the format being used in the original git implementation +const DateFormat = "Mon Jan 02 15:04:05 2006 -0700" + +// Signature is used to identify who and when created a commit or tag. +type Signature struct { + // Name represents a person name. It is an arbitrary string. + Name string + // Email is an email, but it cannot be assumed to be well-formed. + Email string + // When is the timestamp of the signature. + When time.Time +} + +// Decode decodes a byte slice into a signature +func (s *Signature) Decode(b []byte) { + open := bytes.LastIndexByte(b, '<') + close := bytes.LastIndexByte(b, '>') + if open == -1 || close == -1 { + return + } + + if close < open { + return + } + + s.Name = string(bytes.Trim(b[:open], " ")) + s.Email = string(b[open+1 : close]) + + hasTime := close+2 < len(b) + if hasTime { + s.decodeTimeAndTimeZone(b[close+2:]) + } +} + +// Encode encodes a Signature into a writer. +func (s *Signature) Encode(w io.Writer) error { + if _, err := fmt.Fprintf(w, "%s <%s> ", s.Name, s.Email); err != nil { + return err + } + if err := s.encodeTimeAndTimeZone(w); err != nil { + return err + } + return nil +} + +var timeZoneLength = 5 + +func (s *Signature) decodeTimeAndTimeZone(b []byte) { + space := bytes.IndexByte(b, ' ') + if space == -1 { + space = len(b) + } + + ts, err := strconv.ParseInt(string(b[:space]), 10, 64) + if err != nil { + return + } + + s.When = time.Unix(ts, 0).In(time.UTC) + var tzStart = space + 1 + if tzStart >= len(b) || tzStart+timeZoneLength > len(b) { + return + } + + timezone := string(b[tzStart : tzStart+timeZoneLength]) + tzhours, err1 := strconv.ParseInt(timezone[0:3], 10, 64) + tzmins, err2 := strconv.ParseInt(timezone[3:], 10, 64) + if err1 != nil || err2 != nil { + return + } + if tzhours < 0 { + tzmins *= -1 + } + + tz := time.FixedZone("", int(tzhours*60*60+tzmins*60)) + + s.When = s.When.In(tz) +} + +func (s *Signature) encodeTimeAndTimeZone(w io.Writer) error { + u := s.When.Unix() + if u < 0 { + u = 0 + } + _, err := fmt.Fprintf(w, "%d %s", u, s.When.Format("-0700")) + return err +} + +func (s *Signature) String() string { + return fmt.Sprintf("%s <%s>", s.Name, s.Email) +} + +// ObjectIter provides an iterator for a set of objects. +type ObjectIter struct { + storer.EncodedObjectIter + s storer.EncodedObjectStorer +} + +// NewObjectIter takes a storer.EncodedObjectStorer and a +// storer.EncodedObjectIter and returns an *ObjectIter that iterates over all +// objects contained in the storer.EncodedObjectIter. +func NewObjectIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *ObjectIter { + return &ObjectIter{iter, s} +} + +// Next moves the iterator to the next object and returns a pointer to it. If +// there are no more objects, it returns io.EOF. +func (iter *ObjectIter) Next() (Object, error) { + for { + obj, err := iter.EncodedObjectIter.Next() + if err != nil { + return nil, err + } + + o, err := iter.toObject(obj) + if err == plumbing.ErrInvalidType { + continue + } + + if err != nil { + return nil, err + } + + return o, nil + } +} + +// ForEach call the cb function for each object contained on this iter until +// an error happens or the end of the iter is reached. If ErrStop is sent +// the iteration is stop but no error is returned. The iterator is closed. +func (iter *ObjectIter) ForEach(cb func(Object) error) error { + return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { + o, err := iter.toObject(obj) + if err == plumbing.ErrInvalidType { + return nil + } + + if err != nil { + return err + } + + return cb(o) + }) +} + +func (iter *ObjectIter) toObject(obj plumbing.EncodedObject) (Object, error) { + switch obj.Type() { + case plumbing.BlobObject: + blob := &Blob{} + return blob, blob.Decode(obj) + case plumbing.TreeObject: + tree := &Tree{s: iter.s} + return tree, tree.Decode(obj) + case plumbing.CommitObject: + commit := &Commit{} + return commit, commit.Decode(obj) + case plumbing.TagObject: + tag := &Tag{} + return tag, tag.Decode(obj) + default: + return nil, plumbing.ErrInvalidType + } +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go new file mode 100644 index 0000000000..9b5f438c02 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go @@ -0,0 +1,346 @@ +package object + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "math" + "strings" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/filemode" + fdiff "github.com/go-git/go-git/v5/plumbing/format/diff" + "github.com/go-git/go-git/v5/utils/diff" + + dmp "github.com/sergi/go-diff/diffmatchpatch" +) + +var ( + ErrCanceled = errors.New("operation canceled") +) + +func getPatch(message string, changes ...*Change) (*Patch, error) { + ctx := context.Background() + return getPatchContext(ctx, message, changes...) +} + +func getPatchContext(ctx context.Context, message string, changes ...*Change) (*Patch, error) { + var filePatches []fdiff.FilePatch + for _, c := range changes { + select { + case <-ctx.Done(): + return nil, ErrCanceled + default: + } + + fp, err := filePatchWithContext(ctx, c) + if err != nil { + return nil, err + } + + filePatches = append(filePatches, fp) + } + + return &Patch{message, filePatches}, nil +} + +func filePatchWithContext(ctx context.Context, c *Change) (fdiff.FilePatch, error) { + from, to, err := c.Files() + if err != nil { + return nil, err + } + fromContent, fIsBinary, err := fileContent(from) + if err != nil { + return nil, err + } + + toContent, tIsBinary, err := fileContent(to) + if err != nil { + return nil, err + } + + if fIsBinary || tIsBinary { + return &textFilePatch{from: c.From, to: c.To}, nil + } + + diffs := diff.Do(fromContent, toContent) + + var chunks []fdiff.Chunk + for _, d := range diffs { + select { + case <-ctx.Done(): + return nil, ErrCanceled + default: + } + + var op fdiff.Operation + switch d.Type { + case dmp.DiffEqual: + op = fdiff.Equal + case dmp.DiffDelete: + op = fdiff.Delete + case dmp.DiffInsert: + op = fdiff.Add + } + + chunks = append(chunks, &textChunk{d.Text, op}) + } + + return &textFilePatch{ + chunks: chunks, + from: c.From, + to: c.To, + }, nil + +} + +func filePatch(c *Change) (fdiff.FilePatch, error) { + return filePatchWithContext(context.Background(), c) +} + +func fileContent(f *File) (content string, isBinary bool, err error) { + if f == nil { + return + } + + isBinary, err = f.IsBinary() + if err != nil || isBinary { + return + } + + content, err = f.Contents() + + return +} + +// Patch is an implementation of fdiff.Patch interface +type Patch struct { + message string + filePatches []fdiff.FilePatch +} + +func (p *Patch) FilePatches() []fdiff.FilePatch { + return p.filePatches +} + +func (p *Patch) Message() string { + return p.message +} + +func (p *Patch) Encode(w io.Writer) error { + ue := fdiff.NewUnifiedEncoder(w, fdiff.DefaultContextLines) + + return ue.Encode(p) +} + +func (p *Patch) Stats() FileStats { + return getFileStatsFromFilePatches(p.FilePatches()) +} + +func (p *Patch) String() string { + buf := bytes.NewBuffer(nil) + err := p.Encode(buf) + if err != nil { + return fmt.Sprintf("malformed patch: %s", err.Error()) + } + + return buf.String() +} + +// changeEntryWrapper is an implementation of fdiff.File interface +type changeEntryWrapper struct { + ce ChangeEntry +} + +func (f *changeEntryWrapper) Hash() plumbing.Hash { + if !f.ce.TreeEntry.Mode.IsFile() { + return plumbing.ZeroHash + } + + return f.ce.TreeEntry.Hash +} + +func (f *changeEntryWrapper) Mode() filemode.FileMode { + return f.ce.TreeEntry.Mode +} +func (f *changeEntryWrapper) Path() string { + if !f.ce.TreeEntry.Mode.IsFile() { + return "" + } + + return f.ce.Name +} + +func (f *changeEntryWrapper) Empty() bool { + return !f.ce.TreeEntry.Mode.IsFile() +} + +// textFilePatch is an implementation of fdiff.FilePatch interface +type textFilePatch struct { + chunks []fdiff.Chunk + from, to ChangeEntry +} + +func (tf *textFilePatch) Files() (from fdiff.File, to fdiff.File) { + f := &changeEntryWrapper{tf.from} + t := &changeEntryWrapper{tf.to} + + if !f.Empty() { + from = f + } + + if !t.Empty() { + to = t + } + + return +} + +func (tf *textFilePatch) IsBinary() bool { + return len(tf.chunks) == 0 +} + +func (tf *textFilePatch) Chunks() []fdiff.Chunk { + return tf.chunks +} + +// textChunk is an implementation of fdiff.Chunk interface +type textChunk struct { + content string + op fdiff.Operation +} + +func (t *textChunk) Content() string { + return t.content +} + +func (t *textChunk) Type() fdiff.Operation { + return t.op +} + +// FileStat stores the status of changes in content of a file. +type FileStat struct { + Name string + Addition int + Deletion int +} + +func (fs FileStat) String() string { + return printStat([]FileStat{fs}) +} + +// FileStats is a collection of FileStat. +type FileStats []FileStat + +func (fileStats FileStats) String() string { + return printStat(fileStats) +} + +func printStat(fileStats []FileStat) string { + padLength := float64(len(" ")) + newlineLength := float64(len("\n")) + separatorLength := float64(len("|")) + // Soft line length limit. The text length calculation below excludes + // length of the change number. Adding that would take it closer to 80, + // but probably not more than 80, until it's a huge number. + lineLength := 72.0 + + // Get the longest filename and longest total change. + var longestLength float64 + var longestTotalChange float64 + for _, fs := range fileStats { + if int(longestLength) < len(fs.Name) { + longestLength = float64(len(fs.Name)) + } + totalChange := fs.Addition + fs.Deletion + if int(longestTotalChange) < totalChange { + longestTotalChange = float64(totalChange) + } + } + + // Parts of the output: + // |<+++/---> + // example: " main.go | 10 +++++++--- " + + // + leftTextLength := padLength + longestLength + padLength + + // <+++++/-----> + // Excluding number length here. + rightTextLength := padLength + padLength + newlineLength + + totalTextArea := leftTextLength + separatorLength + rightTextLength + heightOfHistogram := lineLength - totalTextArea + + // Scale the histogram. + var scaleFactor float64 + if longestTotalChange > heightOfHistogram { + // Scale down to heightOfHistogram. + scaleFactor = longestTotalChange / heightOfHistogram + } else { + scaleFactor = 1.0 + } + + finalOutput := "" + for _, fs := range fileStats { + addn := float64(fs.Addition) + deln := float64(fs.Deletion) + adds := strings.Repeat("+", int(math.Floor(addn/scaleFactor))) + dels := strings.Repeat("-", int(math.Floor(deln/scaleFactor))) + finalOutput += fmt.Sprintf(" %s | %d %s%s\n", fs.Name, (fs.Addition + fs.Deletion), adds, dels) + } + + return finalOutput +} + +func getFileStatsFromFilePatches(filePatches []fdiff.FilePatch) FileStats { + var fileStats FileStats + + for _, fp := range filePatches { + // ignore empty patches (binary files, submodule refs updates) + if len(fp.Chunks()) == 0 { + continue + } + + cs := FileStat{} + from, to := fp.Files() + if from == nil { + // New File is created. + cs.Name = to.Path() + } else if to == nil { + // File is deleted. + cs.Name = from.Path() + } else if from.Path() != to.Path() { + // File is renamed. Not supported. + // cs.Name = fmt.Sprintf("%s => %s", from.Path(), to.Path()) + } else { + cs.Name = from.Path() + } + + for _, chunk := range fp.Chunks() { + s := chunk.Content() + if len(s) == 0 { + continue + } + + switch chunk.Type() { + case fdiff.Add: + cs.Addition += strings.Count(s, "\n") + if s[len(s)-1] != '\n' { + cs.Addition++ + } + case fdiff.Delete: + cs.Deletion += strings.Count(s, "\n") + if s[len(s)-1] != '\n' { + cs.Deletion++ + } + } + } + + fileStats = append(fileStats, cs) + } + + return fileStats +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/rename.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/rename.go new file mode 100644 index 0000000000..7fed72c2f5 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/rename.go @@ -0,0 +1,813 @@ +package object + +import ( + "errors" + "io" + "sort" + "strings" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/filemode" + "github.com/go-git/go-git/v5/utils/ioutil" + "github.com/go-git/go-git/v5/utils/merkletrie" +) + +// DetectRenames detects the renames in the given changes on two trees with +// the given options. It will return the given changes grouping additions and +// deletions into modifications when possible. +// If options is nil, the default diff tree options will be used. +func DetectRenames( + changes Changes, + opts *DiffTreeOptions, +) (Changes, error) { + if opts == nil { + opts = DefaultDiffTreeOptions + } + + detector := &renameDetector{ + renameScore: int(opts.RenameScore), + renameLimit: int(opts.RenameLimit), + onlyExact: opts.OnlyExactRenames, + } + + for _, c := range changes { + action, err := c.Action() + if err != nil { + return nil, err + } + + switch action { + case merkletrie.Insert: + detector.added = append(detector.added, c) + case merkletrie.Delete: + detector.deleted = append(detector.deleted, c) + default: + detector.modified = append(detector.modified, c) + } + } + + return detector.detect() +} + +// renameDetector will detect and resolve renames in a set of changes. +// see: https://github.com/eclipse/jgit/blob/master/org.eclipse.jgit/src/org/eclipse/jgit/diff/RenameDetector.java +type renameDetector struct { + added []*Change + deleted []*Change + modified []*Change + + renameScore int + renameLimit int + onlyExact bool +} + +// detectExactRenames detects matches files that were deleted with files that +// were added where the hash is the same on both. If there are multiple targets +// the one with the most similar path will be chosen as the rename and the +// rest as either deletions or additions. +func (d *renameDetector) detectExactRenames() { + added := groupChangesByHash(d.added) + deletes := groupChangesByHash(d.deleted) + var uniqueAdds []*Change + var nonUniqueAdds [][]*Change + var addedLeft []*Change + + for _, cs := range added { + if len(cs) == 1 { + uniqueAdds = append(uniqueAdds, cs[0]) + } else { + nonUniqueAdds = append(nonUniqueAdds, cs) + } + } + + for _, c := range uniqueAdds { + hash := changeHash(c) + deleted := deletes[hash] + + if len(deleted) == 1 { + if sameMode(c, deleted[0]) { + d.modified = append(d.modified, &Change{From: deleted[0].From, To: c.To}) + delete(deletes, hash) + } else { + addedLeft = append(addedLeft, c) + } + } else if len(deleted) > 1 { + bestMatch := bestNameMatch(c, deleted) + if bestMatch != nil && sameMode(c, bestMatch) { + d.modified = append(d.modified, &Change{From: bestMatch.From, To: c.To}) + delete(deletes, hash) + + var newDeletes = make([]*Change, 0, len(deleted)-1) + for _, d := range deleted { + if d != bestMatch { + newDeletes = append(newDeletes, d) + } + } + deletes[hash] = newDeletes + } + } else { + addedLeft = append(addedLeft, c) + } + } + + for _, added := range nonUniqueAdds { + hash := changeHash(added[0]) + deleted := deletes[hash] + + if len(deleted) == 1 { + deleted := deleted[0] + bestMatch := bestNameMatch(deleted, added) + if bestMatch != nil && sameMode(deleted, bestMatch) { + d.modified = append(d.modified, &Change{From: deleted.From, To: bestMatch.To}) + delete(deletes, hash) + + for _, c := range added { + if c != bestMatch { + addedLeft = append(addedLeft, c) + } + } + } else { + addedLeft = append(addedLeft, added...) + } + } else if len(deleted) > 1 { + maxSize := len(deleted) * len(added) + if d.renameLimit > 0 && d.renameLimit < maxSize { + maxSize = d.renameLimit + } + + matrix := make(similarityMatrix, 0, maxSize) + + for delIdx, del := range deleted { + deletedName := changeName(del) + + for addIdx, add := range added { + addedName := changeName(add) + + score := nameSimilarityScore(addedName, deletedName) + matrix = append(matrix, similarityPair{added: addIdx, deleted: delIdx, score: score}) + + if len(matrix) >= maxSize { + break + } + } + + if len(matrix) >= maxSize { + break + } + } + + sort.Stable(matrix) + + usedAdds := make(map[*Change]struct{}) + usedDeletes := make(map[*Change]struct{}) + for i := len(matrix) - 1; i >= 0; i-- { + del := deleted[matrix[i].deleted] + add := added[matrix[i].added] + + if add == nil || del == nil { + // it was already matched + continue + } + + usedAdds[add] = struct{}{} + usedDeletes[del] = struct{}{} + d.modified = append(d.modified, &Change{From: del.From, To: add.To}) + added[matrix[i].added] = nil + deleted[matrix[i].deleted] = nil + } + + for _, c := range added { + if _, ok := usedAdds[c]; !ok && c != nil { + addedLeft = append(addedLeft, c) + } + } + + var newDeletes = make([]*Change, 0, len(deleted)-len(usedDeletes)) + for _, c := range deleted { + if _, ok := usedDeletes[c]; !ok && c != nil { + newDeletes = append(newDeletes, c) + } + } + deletes[hash] = newDeletes + } else { + addedLeft = append(addedLeft, added...) + } + } + + d.added = addedLeft + d.deleted = nil + for _, dels := range deletes { + d.deleted = append(d.deleted, dels...) + } +} + +// detectContentRenames detects renames based on the similarity of the content +// in the files by building a matrix of pairs between sources and destinations +// and matching by the highest score. +// see: https://github.com/eclipse/jgit/blob/master/org.eclipse.jgit/src/org/eclipse/jgit/diff/SimilarityRenameDetector.java +func (d *renameDetector) detectContentRenames() error { + cnt := max(len(d.added), len(d.deleted)) + if d.renameLimit > 0 && cnt > d.renameLimit { + return nil + } + + srcs, dsts := d.deleted, d.added + matrix, err := buildSimilarityMatrix(srcs, dsts, d.renameScore) + if err != nil { + return err + } + renames := make([]*Change, 0, min(len(matrix), len(dsts))) + + // Match rename pairs on a first come, first serve basis until + // we have looked at everything that is above the minimum score. + for i := len(matrix) - 1; i >= 0; i-- { + pair := matrix[i] + src := srcs[pair.deleted] + dst := dsts[pair.added] + + if dst == nil || src == nil { + // It was already matched before + continue + } + + renames = append(renames, &Change{From: src.From, To: dst.To}) + + // Claim destination and source as matched + dsts[pair.added] = nil + srcs[pair.deleted] = nil + } + + d.modified = append(d.modified, renames...) + d.added = compactChanges(dsts) + d.deleted = compactChanges(srcs) + + return nil +} + +func (d *renameDetector) detect() (Changes, error) { + if len(d.added) > 0 && len(d.deleted) > 0 { + d.detectExactRenames() + + if !d.onlyExact { + if err := d.detectContentRenames(); err != nil { + return nil, err + } + } + } + + result := make(Changes, 0, len(d.added)+len(d.deleted)+len(d.modified)) + result = append(result, d.added...) + result = append(result, d.deleted...) + result = append(result, d.modified...) + + sort.Stable(result) + + return result, nil +} + +func bestNameMatch(change *Change, changes []*Change) *Change { + var best *Change + var bestScore int + + cname := changeName(change) + + for _, c := range changes { + score := nameSimilarityScore(cname, changeName(c)) + if score > bestScore { + bestScore = score + best = c + } + } + + return best +} + +func nameSimilarityScore(a, b string) int { + aDirLen := strings.LastIndexByte(a, '/') + 1 + bDirLen := strings.LastIndexByte(b, '/') + 1 + + dirMin := min(aDirLen, bDirLen) + dirMax := max(aDirLen, bDirLen) + + var dirScoreLtr, dirScoreRtl int + if dirMax == 0 { + dirScoreLtr = 100 + dirScoreRtl = 100 + } else { + var dirSim int + + for ; dirSim < dirMin; dirSim++ { + if a[dirSim] != b[dirSim] { + break + } + } + + dirScoreLtr = dirSim * 100 / dirMax + + if dirScoreLtr == 100 { + dirScoreRtl = 100 + } else { + for dirSim = 0; dirSim < dirMin; dirSim++ { + if a[aDirLen-1-dirSim] != b[bDirLen-1-dirSim] { + break + } + } + dirScoreRtl = dirSim * 100 / dirMax + } + } + + fileMin := min(len(a)-aDirLen, len(b)-bDirLen) + fileMax := max(len(a)-aDirLen, len(b)-bDirLen) + + fileSim := 0 + for ; fileSim < fileMin; fileSim++ { + if a[len(a)-1-fileSim] != b[len(b)-1-fileSim] { + break + } + } + fileScore := fileSim * 100 / fileMax + + return (((dirScoreLtr + dirScoreRtl) * 25) + (fileScore * 50)) / 100 +} + +func changeName(c *Change) string { + if c.To != empty { + return c.To.Name + } + return c.From.Name +} + +func changeHash(c *Change) plumbing.Hash { + if c.To != empty { + return c.To.TreeEntry.Hash + } + + return c.From.TreeEntry.Hash +} + +func changeMode(c *Change) filemode.FileMode { + if c.To != empty { + return c.To.TreeEntry.Mode + } + + return c.From.TreeEntry.Mode +} + +func sameMode(a, b *Change) bool { + return changeMode(a) == changeMode(b) +} + +func groupChangesByHash(changes []*Change) map[plumbing.Hash][]*Change { + var result = make(map[plumbing.Hash][]*Change) + for _, c := range changes { + hash := changeHash(c) + result[hash] = append(result[hash], c) + } + return result +} + +type similarityMatrix []similarityPair + +func (m similarityMatrix) Len() int { return len(m) } +func (m similarityMatrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } +func (m similarityMatrix) Less(i, j int) bool { + if m[i].score == m[j].score { + if m[i].added == m[j].added { + return m[i].deleted < m[j].deleted + } + return m[i].added < m[j].added + } + return m[i].score < m[j].score +} + +type similarityPair struct { + // index of the added file + added int + // index of the deleted file + deleted int + // similarity score + score int +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func buildSimilarityMatrix(srcs, dsts []*Change, renameScore int) (similarityMatrix, error) { + // Allocate for the worst-case scenario where every pair has a score + // that we need to consider. We might not need that many. + matrix := make(similarityMatrix, 0, len(srcs)*len(dsts)) + srcSizes := make([]int64, len(srcs)) + dstSizes := make([]int64, len(dsts)) + dstTooLarge := make(map[int]bool) + + // Consider each pair of files, if the score is above the minimum + // threshold we need to record that scoring in the matrix so we can + // later find the best matches. +outerLoop: + for srcIdx, src := range srcs { + if changeMode(src) != filemode.Regular { + continue + } + + // Declare the from file and the similarity index here to be able to + // reuse it inside the inner loop. The reason to not initialize them + // here is so we can skip the initialization in case they happen to + // not be needed later. They will be initialized inside the inner + // loop if and only if they're needed and reused in subsequent passes. + var from *File + var s *similarityIndex + var err error + for dstIdx, dst := range dsts { + if changeMode(dst) != filemode.Regular { + continue + } + + if dstTooLarge[dstIdx] { + continue + } + + var to *File + srcSize := srcSizes[srcIdx] + if srcSize == 0 { + from, _, err = src.Files() + if err != nil { + return nil, err + } + srcSize = from.Size + 1 + srcSizes[srcIdx] = srcSize + } + + dstSize := dstSizes[dstIdx] + if dstSize == 0 { + _, to, err = dst.Files() + if err != nil { + return nil, err + } + dstSize = to.Size + 1 + dstSizes[dstIdx] = dstSize + } + + min, max := srcSize, dstSize + if dstSize < srcSize { + min = dstSize + max = srcSize + } + + if int(min*100/max) < renameScore { + // File sizes are too different to be a match + continue + } + + if s == nil { + s, err = fileSimilarityIndex(from) + if err != nil { + if err == errIndexFull { + continue outerLoop + } + return nil, err + } + } + + if to == nil { + _, to, err = dst.Files() + if err != nil { + return nil, err + } + } + + di, err := fileSimilarityIndex(to) + if err != nil { + if err == errIndexFull { + dstTooLarge[dstIdx] = true + } + + return nil, err + } + + contentScore := s.score(di, 10000) + // The name score returns a value between 0 and 100, so we need to + // convert it to the same range as the content score. + nameScore := nameSimilarityScore(src.From.Name, dst.To.Name) * 100 + score := (contentScore*99 + nameScore*1) / 10000 + + if score < renameScore { + continue + } + + matrix = append(matrix, similarityPair{added: dstIdx, deleted: srcIdx, score: score}) + } + } + + sort.Stable(matrix) + + return matrix, nil +} + +func compactChanges(changes []*Change) []*Change { + var result []*Change + for _, c := range changes { + if c != nil { + result = append(result, c) + } + } + return result +} + +const ( + keyShift = 32 + maxCountValue = (1 << keyShift) - 1 +) + +var errIndexFull = errors.New("index is full") + +// similarityIndex is an index structure of lines/blocks in one file. +// This structure can be used to compute an approximation of the similarity +// between two files. +// To save space in memory, this index uses a space efficient encoding which +// will not exceed 1MiB per instance. The index starts out at a smaller size +// (closer to 2KiB), but may grow as more distinct blocks within the scanned +// file are discovered. +// see: https://github.com/eclipse/jgit/blob/master/org.eclipse.jgit/src/org/eclipse/jgit/diff/SimilarityIndex.java +type similarityIndex struct { + hashed uint64 + // number of non-zero entries in hashes + numHashes int + growAt int + hashes []keyCountPair + hashBits int +} + +func fileSimilarityIndex(f *File) (*similarityIndex, error) { + idx := newSimilarityIndex() + if err := idx.hash(f); err != nil { + return nil, err + } + + sort.Stable(keyCountPairs(idx.hashes)) + + return idx, nil +} + +func newSimilarityIndex() *similarityIndex { + return &similarityIndex{ + hashBits: 8, + hashes: make([]keyCountPair, 1<<8), + growAt: shouldGrowAt(8), + } +} + +func (i *similarityIndex) hash(f *File) error { + isBin, err := f.IsBinary() + if err != nil { + return err + } + + r, err := f.Reader() + if err != nil { + return err + } + + defer ioutil.CheckClose(r, &err) + + return i.hashContent(r, f.Size, isBin) +} + +func (i *similarityIndex) hashContent(r io.Reader, size int64, isBin bool) error { + var buf = make([]byte, 4096) + var ptr, cnt int + remaining := size + + for 0 < remaining { + hash := 5381 + var blockHashedCnt uint64 + + // Hash one line or block, whatever happens first + n := int64(0) + for { + if ptr == cnt { + ptr = 0 + var err error + cnt, err = io.ReadFull(r, buf) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + + if cnt == 0 { + return io.EOF + } + } + n++ + c := buf[ptr] & 0xff + ptr++ + + // Ignore CR in CRLF sequence if it's text + if !isBin && c == '\r' && ptr < cnt && buf[ptr] == '\n' { + continue + } + blockHashedCnt++ + + if c == '\n' { + break + } + + hash = (hash << 5) + hash + int(c) + + if n >= 64 || n >= remaining { + break + } + } + i.hashed += blockHashedCnt + if err := i.add(hash, blockHashedCnt); err != nil { + return err + } + remaining -= n + } + + return nil +} + +// score computes the similarity score between this index and another one. +// A region of a file is defined as a line in a text file or a fixed-size +// block in a binary file. To prepare an index, each region in the file is +// hashed; the values and counts of hashes are retained in a sorted table. +// Define the similarity fraction F as the count of matching regions between +// the two files divided between the maximum count of regions in either file. +// The similarity score is F multiplied by the maxScore constant, yielding a +// range [0, maxScore]. It is defined as maxScore for the degenerate case of +// two empty files. +// The similarity score is symmetrical; i.e. a.score(b) == b.score(a). +func (i *similarityIndex) score(other *similarityIndex, maxScore int) int { + var maxHashed = i.hashed + if maxHashed < other.hashed { + maxHashed = other.hashed + } + if maxHashed == 0 { + return maxScore + } + + return int(i.common(other) * uint64(maxScore) / maxHashed) +} + +func (i *similarityIndex) common(dst *similarityIndex) uint64 { + srcIdx, dstIdx := 0, 0 + if i.numHashes == 0 || dst.numHashes == 0 { + return 0 + } + + var common uint64 + srcKey, dstKey := i.hashes[srcIdx].key(), dst.hashes[dstIdx].key() + + for { + if srcKey == dstKey { + srcCnt, dstCnt := i.hashes[srcIdx].count(), dst.hashes[dstIdx].count() + if srcCnt < dstCnt { + common += srcCnt + } else { + common += dstCnt + } + + srcIdx++ + if srcIdx == len(i.hashes) { + break + } + srcKey = i.hashes[srcIdx].key() + + dstIdx++ + if dstIdx == len(dst.hashes) { + break + } + dstKey = dst.hashes[dstIdx].key() + } else if srcKey < dstKey { + // Region of src that is not in dst + srcIdx++ + if srcIdx == len(i.hashes) { + break + } + srcKey = i.hashes[srcIdx].key() + } else { + // Region of dst that is not in src + dstIdx++ + if dstIdx == len(dst.hashes) { + break + } + dstKey = dst.hashes[dstIdx].key() + } + } + + return common +} + +func (i *similarityIndex) add(key int, cnt uint64) error { + key = int(uint32(key) * 0x9e370001 >> 1) + + j := i.slot(key) + for { + v := i.hashes[j] + if v == 0 { + // It's an empty slot, so we can store it here. + if i.growAt <= i.numHashes { + if err := i.grow(); err != nil { + return err + } + j = i.slot(key) + continue + } + + var err error + i.hashes[j], err = newKeyCountPair(key, cnt) + if err != nil { + return err + } + i.numHashes++ + return nil + } else if v.key() == key { + // It's the same key, so increment the counter. + var err error + i.hashes[j], err = newKeyCountPair(key, v.count()+cnt) + if err != nil { + return err + } + return nil + } else if j+1 >= len(i.hashes) { + j = 0 + } else { + j++ + } + } +} + +type keyCountPair uint64 + +func newKeyCountPair(key int, cnt uint64) (keyCountPair, error) { + if cnt > maxCountValue { + return 0, errIndexFull + } + + return keyCountPair((uint64(key) << keyShift) | cnt), nil +} + +func (p keyCountPair) key() int { + return int(p >> keyShift) +} + +func (p keyCountPair) count() uint64 { + return uint64(p) & maxCountValue +} + +func (i *similarityIndex) slot(key int) int { + // We use 31 - hashBits because the upper bit was already forced + // to be 0 and we want the remaining high bits to be used as the + // table slot. + return int(uint32(key) >> uint(31-i.hashBits)) +} + +func shouldGrowAt(hashBits int) int { + return (1 << uint(hashBits)) * (hashBits - 3) / hashBits +} + +func (i *similarityIndex) grow() error { + if i.hashBits == 30 { + return errIndexFull + } + + old := i.hashes + + i.hashBits++ + i.growAt = shouldGrowAt(i.hashBits) + + // TODO(erizocosmico): find a way to check if it will OOM and return + // errIndexFull instead. + i.hashes = make([]keyCountPair, 1<= len(i.hashes) { + j = 0 + } + } + i.hashes[j] = v + } + } + + return nil +} + +type keyCountPairs []keyCountPair + +func (p keyCountPairs) Len() int { return len(p) } +func (p keyCountPairs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p keyCountPairs) Less(i, j int) bool { return p[i] < p[j] } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/tag.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/tag.go new file mode 100644 index 0000000000..4641658045 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/tag.go @@ -0,0 +1,357 @@ +package object + +import ( + "bufio" + "bytes" + "fmt" + "io" + stdioutil "io/ioutil" + "strings" + + "golang.org/x/crypto/openpgp" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +// Tag represents an annotated tag object. It points to a single git object of +// any type, but tags typically are applied to commit or blob objects. It +// provides a reference that associates the target with a tag name. It also +// contains meta-information about the tag, including the tagger, tag date and +// message. +// +// Note that this is not used for lightweight tags. +// +// https://git-scm.com/book/en/v2/Git-Internals-Git-References#Tags +type Tag struct { + // Hash of the tag. + Hash plumbing.Hash + // Name of the tag. + Name string + // Tagger is the one who created the tag. + Tagger Signature + // Message is an arbitrary text message. + Message string + // PGPSignature is the PGP signature of the tag. + PGPSignature string + // TargetType is the object type of the target. + TargetType plumbing.ObjectType + // Target is the hash of the target object. + Target plumbing.Hash + + s storer.EncodedObjectStorer +} + +// GetTag gets a tag from an object storer and decodes it. +func GetTag(s storer.EncodedObjectStorer, h plumbing.Hash) (*Tag, error) { + o, err := s.EncodedObject(plumbing.TagObject, h) + if err != nil { + return nil, err + } + + return DecodeTag(s, o) +} + +// DecodeTag decodes an encoded object into a *Commit and associates it to the +// given object storer. +func DecodeTag(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Tag, error) { + t := &Tag{s: s} + if err := t.Decode(o); err != nil { + return nil, err + } + + return t, nil +} + +// ID returns the object ID of the tag, not the object that the tag references. +// The returned value will always match the current value of Tag.Hash. +// +// ID is present to fulfill the Object interface. +func (t *Tag) ID() plumbing.Hash { + return t.Hash +} + +// Type returns the type of object. It always returns plumbing.TagObject. +// +// Type is present to fulfill the Object interface. +func (t *Tag) Type() plumbing.ObjectType { + return plumbing.TagObject +} + +// Decode transforms a plumbing.EncodedObject into a Tag struct. +func (t *Tag) Decode(o plumbing.EncodedObject) (err error) { + if o.Type() != plumbing.TagObject { + return ErrUnsupportedObject + } + + t.Hash = o.Hash() + + reader, err := o.Reader() + if err != nil { + return err + } + defer ioutil.CheckClose(reader, &err) + + r := bufPool.Get().(*bufio.Reader) + defer bufPool.Put(r) + r.Reset(reader) + for { + var line []byte + line, err = r.ReadBytes('\n') + if err != nil && err != io.EOF { + return err + } + + line = bytes.TrimSpace(line) + if len(line) == 0 { + break // Start of message + } + + split := bytes.SplitN(line, []byte{' '}, 2) + switch string(split[0]) { + case "object": + t.Target = plumbing.NewHash(string(split[1])) + case "type": + t.TargetType, err = plumbing.ParseObjectType(string(split[1])) + if err != nil { + return err + } + case "tag": + t.Name = string(split[1]) + case "tagger": + t.Tagger.Decode(split[1]) + } + + if err == io.EOF { + return nil + } + } + + data, err := stdioutil.ReadAll(r) + if err != nil { + return err + } + + var pgpsig bool + // Check if data contains PGP signature. + if bytes.Contains(data, []byte(beginpgp)) { + // Split the lines at newline. + messageAndSig := bytes.Split(data, []byte("\n")) + + for _, l := range messageAndSig { + if pgpsig { + if bytes.Contains(l, []byte(endpgp)) { + t.PGPSignature += endpgp + "\n" + break + } else { + t.PGPSignature += string(l) + "\n" + } + continue + } + + // Check if it's the beginning of a PGP signature. + if bytes.Contains(l, []byte(beginpgp)) { + t.PGPSignature += beginpgp + "\n" + pgpsig = true + continue + } + + t.Message += string(l) + "\n" + } + } else { + t.Message = string(data) + } + + return nil +} + +// Encode transforms a Tag into a plumbing.EncodedObject. +func (t *Tag) Encode(o plumbing.EncodedObject) error { + return t.encode(o, true) +} + +// EncodeWithoutSignature export a Tag into a plumbing.EncodedObject without the signature (correspond to the payload of the PGP signature). +func (t *Tag) EncodeWithoutSignature(o plumbing.EncodedObject) error { + return t.encode(o, false) +} + +func (t *Tag) encode(o plumbing.EncodedObject, includeSig bool) (err error) { + o.SetType(plumbing.TagObject) + w, err := o.Writer() + if err != nil { + return err + } + defer ioutil.CheckClose(w, &err) + + if _, err = fmt.Fprintf(w, + "object %s\ntype %s\ntag %s\ntagger ", + t.Target.String(), t.TargetType.Bytes(), t.Name); err != nil { + return err + } + + if err = t.Tagger.Encode(w); err != nil { + return err + } + + if _, err = fmt.Fprint(w, "\n\n"); err != nil { + return err + } + + if _, err = fmt.Fprint(w, t.Message); err != nil { + return err + } + + // Note that this is highly sensitive to what it sent along in the message. + // Message *always* needs to end with a newline, or else the message and the + // signature will be concatenated into a corrupt object. Since this is a + // lower-level method, we assume you know what you are doing and have already + // done the needful on the message in the caller. + if includeSig { + if _, err = fmt.Fprint(w, t.PGPSignature); err != nil { + return err + } + } + + return err +} + +// Commit returns the commit pointed to by the tag. If the tag points to a +// different type of object ErrUnsupportedObject will be returned. +func (t *Tag) Commit() (*Commit, error) { + if t.TargetType != plumbing.CommitObject { + return nil, ErrUnsupportedObject + } + + o, err := t.s.EncodedObject(plumbing.CommitObject, t.Target) + if err != nil { + return nil, err + } + + return DecodeCommit(t.s, o) +} + +// Tree returns the tree pointed to by the tag. If the tag points to a commit +// object the tree of that commit will be returned. If the tag does not point +// to a commit or tree object ErrUnsupportedObject will be returned. +func (t *Tag) Tree() (*Tree, error) { + switch t.TargetType { + case plumbing.CommitObject: + c, err := t.Commit() + if err != nil { + return nil, err + } + + return c.Tree() + case plumbing.TreeObject: + return GetTree(t.s, t.Target) + default: + return nil, ErrUnsupportedObject + } +} + +// Blob returns the blob pointed to by the tag. If the tag points to a +// different type of object ErrUnsupportedObject will be returned. +func (t *Tag) Blob() (*Blob, error) { + if t.TargetType != plumbing.BlobObject { + return nil, ErrUnsupportedObject + } + + return GetBlob(t.s, t.Target) +} + +// Object returns the object pointed to by the tag. +func (t *Tag) Object() (Object, error) { + o, err := t.s.EncodedObject(t.TargetType, t.Target) + if err != nil { + return nil, err + } + + return DecodeObject(t.s, o) +} + +// String returns the meta information contained in the tag as a formatted +// string. +func (t *Tag) String() string { + obj, _ := t.Object() + + return fmt.Sprintf( + "%s %s\nTagger: %s\nDate: %s\n\n%s\n%s", + plumbing.TagObject, t.Name, t.Tagger.String(), t.Tagger.When.Format(DateFormat), + t.Message, objectAsString(obj), + ) +} + +// Verify performs PGP verification of the tag with a provided armored +// keyring and returns openpgp.Entity associated with verifying key on success. +func (t *Tag) Verify(armoredKeyRing string) (*openpgp.Entity, error) { + keyRingReader := strings.NewReader(armoredKeyRing) + keyring, err := openpgp.ReadArmoredKeyRing(keyRingReader) + if err != nil { + return nil, err + } + + // Extract signature. + signature := strings.NewReader(t.PGPSignature) + + encoded := &plumbing.MemoryObject{} + // Encode tag components, excluding signature and get a reader object. + if err := t.EncodeWithoutSignature(encoded); err != nil { + return nil, err + } + er, err := encoded.Reader() + if err != nil { + return nil, err + } + + return openpgp.CheckArmoredDetachedSignature(keyring, er, signature) +} + +// TagIter provides an iterator for a set of tags. +type TagIter struct { + storer.EncodedObjectIter + s storer.EncodedObjectStorer +} + +// NewTagIter takes a storer.EncodedObjectStorer and a +// storer.EncodedObjectIter and returns a *TagIter that iterates over all +// tags contained in the storer.EncodedObjectIter. +// +// Any non-tag object returned by the storer.EncodedObjectIter is skipped. +func NewTagIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *TagIter { + return &TagIter{iter, s} +} + +// Next moves the iterator to the next tag and returns a pointer to it. If +// there are no more tags, it returns io.EOF. +func (iter *TagIter) Next() (*Tag, error) { + obj, err := iter.EncodedObjectIter.Next() + if err != nil { + return nil, err + } + + return DecodeTag(iter.s, obj) +} + +// ForEach call the cb function for each tag contained on this iter until +// an error happens or the end of the iter is reached. If ErrStop is sent +// the iteration is stop but no error is returned. The iterator is closed. +func (iter *TagIter) ForEach(cb func(*Tag) error) error { + return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { + t, err := DecodeTag(iter.s, obj) + if err != nil { + return err + } + + return cb(t) + }) +} + +func objectAsString(obj Object) string { + switch o := obj.(type) { + case *Commit: + return o.String() + default: + return "" + } +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go new file mode 100644 index 0000000000..5e6378ca49 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go @@ -0,0 +1,525 @@ +package object + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "path" + "path/filepath" + "strings" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/filemode" + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +const ( + maxTreeDepth = 1024 + startingStackSize = 8 +) + +// New errors defined by this package. +var ( + ErrMaxTreeDepth = errors.New("maximum tree depth exceeded") + ErrFileNotFound = errors.New("file not found") + ErrDirectoryNotFound = errors.New("directory not found") + ErrEntryNotFound = errors.New("entry not found") +) + +// Tree is basically like a directory - it references a bunch of other trees +// and/or blobs (i.e. files and sub-directories) +type Tree struct { + Entries []TreeEntry + Hash plumbing.Hash + + s storer.EncodedObjectStorer + m map[string]*TreeEntry + t map[string]*Tree // tree path cache +} + +// GetTree gets a tree from an object storer and decodes it. +func GetTree(s storer.EncodedObjectStorer, h plumbing.Hash) (*Tree, error) { + o, err := s.EncodedObject(plumbing.TreeObject, h) + if err != nil { + return nil, err + } + + return DecodeTree(s, o) +} + +// DecodeTree decodes an encoded object into a *Tree and associates it to the +// given object storer. +func DecodeTree(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Tree, error) { + t := &Tree{s: s} + if err := t.Decode(o); err != nil { + return nil, err + } + + return t, nil +} + +// TreeEntry represents a file +type TreeEntry struct { + Name string + Mode filemode.FileMode + Hash plumbing.Hash +} + +// File returns the hash of the file identified by the `path` argument. +// The path is interpreted as relative to the tree receiver. +func (t *Tree) File(path string) (*File, error) { + e, err := t.FindEntry(path) + if err != nil { + return nil, ErrFileNotFound + } + + blob, err := GetBlob(t.s, e.Hash) + if err != nil { + if err == plumbing.ErrObjectNotFound { + return nil, ErrFileNotFound + } + return nil, err + } + + return NewFile(path, e.Mode, blob), nil +} + +// Size returns the plaintext size of an object, without reading it +// into memory. +func (t *Tree) Size(path string) (int64, error) { + e, err := t.FindEntry(path) + if err != nil { + return 0, ErrEntryNotFound + } + + return t.s.EncodedObjectSize(e.Hash) +} + +// Tree returns the tree identified by the `path` argument. +// The path is interpreted as relative to the tree receiver. +func (t *Tree) Tree(path string) (*Tree, error) { + e, err := t.FindEntry(path) + if err != nil { + return nil, ErrDirectoryNotFound + } + + tree, err := GetTree(t.s, e.Hash) + if err == plumbing.ErrObjectNotFound { + return nil, ErrDirectoryNotFound + } + + return tree, err +} + +// TreeEntryFile returns the *File for a given *TreeEntry. +func (t *Tree) TreeEntryFile(e *TreeEntry) (*File, error) { + blob, err := GetBlob(t.s, e.Hash) + if err != nil { + return nil, err + } + + return NewFile(e.Name, e.Mode, blob), nil +} + +// FindEntry search a TreeEntry in this tree or any subtree. +func (t *Tree) FindEntry(path string) (*TreeEntry, error) { + if t.t == nil { + t.t = make(map[string]*Tree) + } + + pathParts := strings.Split(path, "/") + startingTree := t + pathCurrent := "" + + // search for the longest path in the tree path cache + for i := len(pathParts) - 1; i > 1; i-- { + path := filepath.Join(pathParts[:i]...) + + tree, ok := t.t[path] + if ok { + startingTree = tree + pathParts = pathParts[i:] + pathCurrent = path + + break + } + } + + var tree *Tree + var err error + for tree = startingTree; len(pathParts) > 1; pathParts = pathParts[1:] { + if tree, err = tree.dir(pathParts[0]); err != nil { + return nil, err + } + + pathCurrent = filepath.Join(pathCurrent, pathParts[0]) + t.t[pathCurrent] = tree + } + + return tree.entry(pathParts[0]) +} + +func (t *Tree) dir(baseName string) (*Tree, error) { + entry, err := t.entry(baseName) + if err != nil { + return nil, ErrDirectoryNotFound + } + + obj, err := t.s.EncodedObject(plumbing.TreeObject, entry.Hash) + if err != nil { + return nil, err + } + + tree := &Tree{s: t.s} + err = tree.Decode(obj) + + return tree, err +} + +func (t *Tree) entry(baseName string) (*TreeEntry, error) { + if t.m == nil { + t.buildMap() + } + + entry, ok := t.m[baseName] + if !ok { + return nil, ErrEntryNotFound + } + + return entry, nil +} + +// Files returns a FileIter allowing to iterate over the Tree +func (t *Tree) Files() *FileIter { + return NewFileIter(t.s, t) +} + +// ID returns the object ID of the tree. The returned value will always match +// the current value of Tree.Hash. +// +// ID is present to fulfill the Object interface. +func (t *Tree) ID() plumbing.Hash { + return t.Hash +} + +// Type returns the type of object. It always returns plumbing.TreeObject. +func (t *Tree) Type() plumbing.ObjectType { + return plumbing.TreeObject +} + +// Decode transform an plumbing.EncodedObject into a Tree struct +func (t *Tree) Decode(o plumbing.EncodedObject) (err error) { + if o.Type() != plumbing.TreeObject { + return ErrUnsupportedObject + } + + t.Hash = o.Hash() + if o.Size() == 0 { + return nil + } + + t.Entries = nil + t.m = nil + + reader, err := o.Reader() + if err != nil { + return err + } + defer ioutil.CheckClose(reader, &err) + + r := bufPool.Get().(*bufio.Reader) + defer bufPool.Put(r) + r.Reset(reader) + for { + str, err := r.ReadString(' ') + if err != nil { + if err == io.EOF { + break + } + + return err + } + str = str[:len(str)-1] // strip last byte (' ') + + mode, err := filemode.New(str) + if err != nil { + return err + } + + name, err := r.ReadString(0) + if err != nil && err != io.EOF { + return err + } + + var hash plumbing.Hash + if _, err = io.ReadFull(r, hash[:]); err != nil { + return err + } + + baseName := name[:len(name)-1] + t.Entries = append(t.Entries, TreeEntry{ + Hash: hash, + Mode: mode, + Name: baseName, + }) + } + + return nil +} + +// Encode transforms a Tree into a plumbing.EncodedObject. +func (t *Tree) Encode(o plumbing.EncodedObject) (err error) { + o.SetType(plumbing.TreeObject) + w, err := o.Writer() + if err != nil { + return err + } + + defer ioutil.CheckClose(w, &err) + for _, entry := range t.Entries { + if _, err = fmt.Fprintf(w, "%o %s", entry.Mode, entry.Name); err != nil { + return err + } + + if _, err = w.Write([]byte{0x00}); err != nil { + return err + } + + if _, err = w.Write(entry.Hash[:]); err != nil { + return err + } + } + + return err +} + +func (t *Tree) buildMap() { + t.m = make(map[string]*TreeEntry) + for i := 0; i < len(t.Entries); i++ { + t.m[t.Entries[i].Name] = &t.Entries[i] + } +} + +// Diff returns a list of changes between this tree and the provided one +func (t *Tree) Diff(to *Tree) (Changes, error) { + return t.DiffContext(context.Background(), to) +} + +// DiffContext returns a list of changes between this tree and the provided one +// Error will be returned if context expires. Provided context must be non nil. +// +// NOTE: Since version 5.1.0 the renames are correctly handled, the settings +// used are the recommended options DefaultDiffTreeOptions. +func (t *Tree) DiffContext(ctx context.Context, to *Tree) (Changes, error) { + return DiffTreeWithOptions(ctx, t, to, DefaultDiffTreeOptions) +} + +// Patch returns a slice of Patch objects with all the changes between trees +// in chunks. This representation can be used to create several diff outputs. +func (t *Tree) Patch(to *Tree) (*Patch, error) { + return t.PatchContext(context.Background(), to) +} + +// PatchContext returns a slice of Patch objects with all the changes between +// trees in chunks. This representation can be used to create several diff +// outputs. If context expires, an error will be returned. Provided context must +// be non-nil. +// +// NOTE: Since version 5.1.0 the renames are correctly handled, the settings +// used are the recommended options DefaultDiffTreeOptions. +func (t *Tree) PatchContext(ctx context.Context, to *Tree) (*Patch, error) { + changes, err := t.DiffContext(ctx, to) + if err != nil { + return nil, err + } + + return changes.PatchContext(ctx) +} + +// treeEntryIter facilitates iterating through the TreeEntry objects in a Tree. +type treeEntryIter struct { + t *Tree + pos int +} + +func (iter *treeEntryIter) Next() (TreeEntry, error) { + if iter.pos >= len(iter.t.Entries) { + return TreeEntry{}, io.EOF + } + iter.pos++ + return iter.t.Entries[iter.pos-1], nil +} + +// TreeWalker provides a means of walking through all of the entries in a Tree. +type TreeWalker struct { + stack []*treeEntryIter + base string + recursive bool + seen map[plumbing.Hash]bool + + s storer.EncodedObjectStorer + t *Tree +} + +// NewTreeWalker returns a new TreeWalker for the given tree. +// +// It is the caller's responsibility to call Close() when finished with the +// tree walker. +func NewTreeWalker(t *Tree, recursive bool, seen map[plumbing.Hash]bool) *TreeWalker { + stack := make([]*treeEntryIter, 0, startingStackSize) + stack = append(stack, &treeEntryIter{t, 0}) + + return &TreeWalker{ + stack: stack, + recursive: recursive, + seen: seen, + + s: t.s, + t: t, + } +} + +// Next returns the next object from the tree. Objects are returned in order +// and subtrees are included. After the last object has been returned further +// calls to Next() will return io.EOF. +// +// In the current implementation any objects which cannot be found in the +// underlying repository will be skipped automatically. It is possible that this +// may change in future versions. +func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) { + var obj *Tree + for { + current := len(w.stack) - 1 + if current < 0 { + // Nothing left on the stack so we're finished + err = io.EOF + return + } + + if current > maxTreeDepth { + // We're probably following bad data or some self-referencing tree + err = ErrMaxTreeDepth + return + } + + entry, err = w.stack[current].Next() + if err == io.EOF { + // Finished with the current tree, move back up to the parent + w.stack = w.stack[:current] + w.base, _ = path.Split(w.base) + w.base = strings.TrimSuffix(w.base, "/") + continue + } + + if err != nil { + return + } + + if w.seen[entry.Hash] { + continue + } + + if entry.Mode == filemode.Dir { + obj, err = GetTree(w.s, entry.Hash) + } + + name = simpleJoin(w.base, entry.Name) + + if err != nil { + err = io.EOF + return + } + + break + } + + if !w.recursive { + return + } + + if obj != nil { + w.stack = append(w.stack, &treeEntryIter{obj, 0}) + w.base = simpleJoin(w.base, entry.Name) + } + + return +} + +// Tree returns the tree that the tree walker most recently operated on. +func (w *TreeWalker) Tree() *Tree { + current := len(w.stack) - 1 + if w.stack[current].pos == 0 { + current-- + } + + if current < 0 { + return nil + } + + return w.stack[current].t +} + +// Close releases any resources used by the TreeWalker. +func (w *TreeWalker) Close() { + w.stack = nil +} + +// TreeIter provides an iterator for a set of trees. +type TreeIter struct { + storer.EncodedObjectIter + s storer.EncodedObjectStorer +} + +// NewTreeIter takes a storer.EncodedObjectStorer and a +// storer.EncodedObjectIter and returns a *TreeIter that iterates over all +// tree contained in the storer.EncodedObjectIter. +// +// Any non-tree object returned by the storer.EncodedObjectIter is skipped. +func NewTreeIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *TreeIter { + return &TreeIter{iter, s} +} + +// Next moves the iterator to the next tree and returns a pointer to it. If +// there are no more trees, it returns io.EOF. +func (iter *TreeIter) Next() (*Tree, error) { + for { + obj, err := iter.EncodedObjectIter.Next() + if err != nil { + return nil, err + } + + if obj.Type() != plumbing.TreeObject { + continue + } + + return DecodeTree(iter.s, obj) + } +} + +// ForEach call the cb function for each tree contained on this iter until +// an error happens or the end of the iter is reached. If ErrStop is sent +// the iteration is stop but no error is returned. The iterator is closed. +func (iter *TreeIter) ForEach(cb func(*Tree) error) error { + return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { + if obj.Type() != plumbing.TreeObject { + return nil + } + + t, err := DecodeTree(iter.s, obj) + if err != nil { + return err + } + + return cb(t) + }) +} + +func simpleJoin(parent, child string) string { + if len(parent) > 0 { + return parent + "/" + child + } + return child +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go new file mode 100644 index 0000000000..b4891b957c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go @@ -0,0 +1,136 @@ +package object + +import ( + "io" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/filemode" + "github.com/go-git/go-git/v5/utils/merkletrie/noder" +) + +// A treenoder is a helper type that wraps git trees into merkletrie +// noders. +// +// As a merkletrie noder doesn't understand the concept of modes (e.g. +// file permissions), the treenoder includes the mode of the git tree in +// the hash, so changes in the modes will be detected as modifications +// to the file contents by the merkletrie difftree algorithm. This is +// consistent with how the "git diff-tree" command works. +type treeNoder struct { + parent *Tree // the root node is its own parent + name string // empty string for the root node + mode filemode.FileMode + hash plumbing.Hash + children []noder.Noder // memoized +} + +// NewTreeRootNode returns the root node of a Tree +func NewTreeRootNode(t *Tree) noder.Noder { + if t == nil { + return &treeNoder{} + } + + return &treeNoder{ + parent: t, + name: "", + mode: filemode.Dir, + hash: t.Hash, + } +} + +func (t *treeNoder) isRoot() bool { + return t.name == "" +} + +func (t *treeNoder) String() string { + return "treeNoder <" + t.name + ">" +} + +func (t *treeNoder) Hash() []byte { + if t.mode == filemode.Deprecated { + return append(t.hash[:], filemode.Regular.Bytes()...) + } + return append(t.hash[:], t.mode.Bytes()...) +} + +func (t *treeNoder) Name() string { + return t.name +} + +func (t *treeNoder) IsDir() bool { + return t.mode == filemode.Dir +} + +// Children will return the children of a treenoder as treenoders, +// building them from the children of the wrapped git tree. +func (t *treeNoder) Children() ([]noder.Noder, error) { + if t.mode != filemode.Dir { + return noder.NoChildren, nil + } + + // children are memoized for efficiency + if t.children != nil { + return t.children, nil + } + + // the parent of the returned children will be ourself as a tree if + // we are a not the root treenoder. The root is special as it + // is is own parent. + parent := t.parent + if !t.isRoot() { + var err error + if parent, err = t.parent.Tree(t.name); err != nil { + return nil, err + } + } + + return transformChildren(parent) +} + +// Returns the children of a tree as treenoders. +// Efficiency is key here. +func transformChildren(t *Tree) ([]noder.Noder, error) { + var err error + var e TreeEntry + + // there will be more tree entries than children in the tree, + // due to submodules and empty directories, but I think it is still + // worth it to pre-allocate the whole array now, even if sometimes + // is bigger than needed. + ret := make([]noder.Noder, 0, len(t.Entries)) + + walker := NewTreeWalker(t, false, nil) // don't recurse + // don't defer walker.Close() for efficiency reasons. + for { + _, e, err = walker.Next() + if err == io.EOF { + break + } + if err != nil { + walker.Close() + return nil, err + } + + ret = append(ret, &treeNoder{ + parent: t, + name: e.Name, + mode: e.Mode, + hash: e.Hash, + }) + } + walker.Close() + + return ret, nil +} + +// len(t.tree.Entries) != the number of elements walked by treewalker +// for some reason because of empty directories, submodules, etc, so we +// have to walk here. +func (t *treeNoder) NumChildren() (int, error) { + children, err := t.Children() + if err != nil { + return 0, err + } + + return len(children), nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs.go new file mode 100644 index 0000000000..1bd724cad5 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs.go @@ -0,0 +1,211 @@ +package packp + +import ( + "fmt" + "sort" + "strings" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/storage/memory" +) + +// AdvRefs values represent the information transmitted on an +// advertised-refs message. Values from this type are not zero-value +// safe, use the New function instead. +type AdvRefs struct { + // Prefix stores prefix payloads. + // + // When using this message over (smart) HTTP, you have to add a pktline + // before the whole thing with the following payload: + // + // '# service=$servicename" LF + // + // Moreover, some (all) git HTTP smart servers will send a flush-pkt + // just after the first pkt-line. + // + // To accommodate both situations, the Prefix field allow you to store + // any data you want to send before the actual pktlines. It will also + // be filled up with whatever is found on the line. + Prefix [][]byte + // Head stores the resolved HEAD reference if present. + // This can be present with git-upload-pack, not with git-receive-pack. + Head *plumbing.Hash + // Capabilities are the capabilities. + Capabilities *capability.List + // References are the hash references. + References map[string]plumbing.Hash + // Peeled are the peeled hash references. + Peeled map[string]plumbing.Hash + // Shallows are the shallow object ids. + Shallows []plumbing.Hash +} + +// NewAdvRefs returns a pointer to a new AdvRefs value, ready to be used. +func NewAdvRefs() *AdvRefs { + return &AdvRefs{ + Prefix: [][]byte{}, + Capabilities: capability.NewList(), + References: make(map[string]plumbing.Hash), + Peeled: make(map[string]plumbing.Hash), + Shallows: []plumbing.Hash{}, + } +} + +func (a *AdvRefs) AddReference(r *plumbing.Reference) error { + switch r.Type() { + case plumbing.SymbolicReference: + v := fmt.Sprintf("%s:%s", r.Name().String(), r.Target().String()) + a.Capabilities.Add(capability.SymRef, v) + case plumbing.HashReference: + a.References[r.Name().String()] = r.Hash() + default: + return plumbing.ErrInvalidType + } + + return nil +} + +func (a *AdvRefs) AllReferences() (memory.ReferenceStorage, error) { + s := memory.ReferenceStorage{} + if err := a.addRefs(s); err != nil { + return s, plumbing.NewUnexpectedError(err) + } + + return s, nil +} + +func (a *AdvRefs) addRefs(s storer.ReferenceStorer) error { + for name, hash := range a.References { + ref := plumbing.NewReferenceFromStrings(name, hash.String()) + if err := s.SetReference(ref); err != nil { + return err + } + } + + if a.supportSymrefs() { + return a.addSymbolicRefs(s) + } + + return a.resolveHead(s) +} + +// If the server does not support symrefs capability, +// we need to guess the reference where HEAD is pointing to. +// +// Git versions prior to 1.8.4.3 has an special procedure to get +// the reference where is pointing to HEAD: +// - Check if a reference called master exists. If exists and it +// has the same hash as HEAD hash, we can say that HEAD is pointing to master +// - If master does not exists or does not have the same hash as HEAD, +// order references and check in that order if that reference has the same +// hash than HEAD. If yes, set HEAD pointing to that branch hash +// - If no reference is found, throw an error +func (a *AdvRefs) resolveHead(s storer.ReferenceStorer) error { + if a.Head == nil { + return nil + } + + ref, err := s.Reference(plumbing.Master) + + // check first if HEAD is pointing to master + if err == nil { + ok, err := a.createHeadIfCorrectReference(ref, s) + if err != nil { + return err + } + + if ok { + return nil + } + } + + if err != nil && err != plumbing.ErrReferenceNotFound { + return err + } + + // From here we are trying to guess the branch that HEAD is pointing + refIter, err := s.IterReferences() + if err != nil { + return err + } + + var refNames []string + err = refIter.ForEach(func(r *plumbing.Reference) error { + refNames = append(refNames, string(r.Name())) + return nil + }) + if err != nil { + return err + } + + sort.Strings(refNames) + + var headSet bool + for _, refName := range refNames { + ref, err := s.Reference(plumbing.ReferenceName(refName)) + if err != nil { + return err + } + ok, err := a.createHeadIfCorrectReference(ref, s) + if err != nil { + return err + } + if ok { + headSet = true + break + } + } + + if !headSet { + return plumbing.ErrReferenceNotFound + } + + return nil +} + +func (a *AdvRefs) createHeadIfCorrectReference( + reference *plumbing.Reference, + s storer.ReferenceStorer) (bool, error) { + if reference.Hash() == *a.Head { + headRef := plumbing.NewSymbolicReference(plumbing.HEAD, reference.Name()) + if err := s.SetReference(headRef); err != nil { + return false, err + } + + return true, nil + } + + return false, nil +} + +func (a *AdvRefs) addSymbolicRefs(s storer.ReferenceStorer) error { + for _, symref := range a.Capabilities.Get(capability.SymRef) { + chunks := strings.Split(symref, ":") + if len(chunks) != 2 { + err := fmt.Errorf("bad number of `:` in symref value (%q)", symref) + return plumbing.NewUnexpectedError(err) + } + name := plumbing.ReferenceName(chunks[0]) + target := plumbing.ReferenceName(chunks[1]) + ref := plumbing.NewSymbolicReference(name, target) + if err := s.SetReference(ref); err != nil { + return nil + } + } + + return nil +} + +func (a *AdvRefs) supportSymrefs() bool { + return a.Capabilities.Supports(capability.SymRef) +} + +// IsEmpty returns true if doesn't contain any reference. +func (a *AdvRefs) IsEmpty() bool { + return a.Head == nil && + len(a.References) == 0 && + len(a.Peeled) == 0 && + len(a.Shallows) == 0 +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_decode.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_decode.go new file mode 100644 index 0000000000..63bbe5ab16 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_decode.go @@ -0,0 +1,288 @@ +package packp + +import ( + "bytes" + "encoding/hex" + "errors" + "fmt" + "io" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/format/pktline" +) + +// Decode reads the next advertised-refs message form its input and +// stores it in the AdvRefs. +func (a *AdvRefs) Decode(r io.Reader) error { + d := newAdvRefsDecoder(r) + return d.Decode(a) +} + +type advRefsDecoder struct { + s *pktline.Scanner // a pkt-line scanner from the input stream + line []byte // current pkt-line contents, use parser.nextLine() to make it advance + nLine int // current pkt-line number for debugging, begins at 1 + hash plumbing.Hash // last hash read + err error // sticky error, use the parser.error() method to fill this out + data *AdvRefs // parsed data is stored here +} + +var ( + // ErrEmptyAdvRefs is returned by Decode if it gets an empty advertised + // references message. + ErrEmptyAdvRefs = errors.New("empty advertised-ref message") + // ErrEmptyInput is returned by Decode if the input is empty. + ErrEmptyInput = errors.New("empty input") +) + +func newAdvRefsDecoder(r io.Reader) *advRefsDecoder { + return &advRefsDecoder{ + s: pktline.NewScanner(r), + } +} + +func (d *advRefsDecoder) Decode(v *AdvRefs) error { + d.data = v + + for state := decodePrefix; state != nil; { + state = state(d) + } + + return d.err +} + +type decoderStateFn func(*advRefsDecoder) decoderStateFn + +// fills out the parser sticky error +func (d *advRefsDecoder) error(format string, a ...interface{}) { + msg := fmt.Sprintf( + "pkt-line %d: %s", d.nLine, + fmt.Sprintf(format, a...), + ) + + d.err = NewErrUnexpectedData(msg, d.line) +} + +// Reads a new pkt-line from the scanner, makes its payload available as +// p.line and increments p.nLine. A successful invocation returns true, +// otherwise, false is returned and the sticky error is filled out +// accordingly. Trims eols at the end of the payloads. +func (d *advRefsDecoder) nextLine() bool { + d.nLine++ + + if !d.s.Scan() { + if d.err = d.s.Err(); d.err != nil { + return false + } + + if d.nLine == 1 { + d.err = ErrEmptyInput + return false + } + + d.error("EOF") + return false + } + + d.line = d.s.Bytes() + d.line = bytes.TrimSuffix(d.line, eol) + + return true +} + +// The HTTP smart prefix is often followed by a flush-pkt. +func decodePrefix(d *advRefsDecoder) decoderStateFn { + if ok := d.nextLine(); !ok { + return nil + } + + if !isPrefix(d.line) { + return decodeFirstHash + } + + tmp := make([]byte, len(d.line)) + copy(tmp, d.line) + d.data.Prefix = append(d.data.Prefix, tmp) + if ok := d.nextLine(); !ok { + return nil + } + + if !isFlush(d.line) { + return decodeFirstHash + } + + d.data.Prefix = append(d.data.Prefix, pktline.Flush) + if ok := d.nextLine(); !ok { + return nil + } + + return decodeFirstHash +} + +func isPrefix(payload []byte) bool { + return len(payload) > 0 && payload[0] == '#' +} + +// If the first hash is zero, then a no-refs is coming. Otherwise, a +// list-of-refs is coming, and the hash will be followed by the first +// advertised ref. +func decodeFirstHash(p *advRefsDecoder) decoderStateFn { + // If the repository is empty, we receive a flush here (HTTP). + if isFlush(p.line) { + p.err = ErrEmptyAdvRefs + return nil + } + + if len(p.line) < hashSize { + p.error("cannot read hash, pkt-line too short") + return nil + } + + if _, err := hex.Decode(p.hash[:], p.line[:hashSize]); err != nil { + p.error("invalid hash text: %s", err) + return nil + } + + p.line = p.line[hashSize:] + + if p.hash.IsZero() { + return decodeSkipNoRefs + } + + return decodeFirstRef +} + +// Skips SP "capabilities^{}" NUL +func decodeSkipNoRefs(p *advRefsDecoder) decoderStateFn { + if len(p.line) < len(noHeadMark) { + p.error("too short zero-id ref") + return nil + } + + if !bytes.HasPrefix(p.line, noHeadMark) { + p.error("malformed zero-id ref") + return nil + } + + p.line = p.line[len(noHeadMark):] + + return decodeCaps +} + +// decode the refname, expects SP refname NULL +func decodeFirstRef(l *advRefsDecoder) decoderStateFn { + if len(l.line) < 3 { + l.error("line too short after hash") + return nil + } + + if !bytes.HasPrefix(l.line, sp) { + l.error("no space after hash") + return nil + } + l.line = l.line[1:] + + chunks := bytes.SplitN(l.line, null, 2) + if len(chunks) < 2 { + l.error("NULL not found") + return nil + } + ref := chunks[0] + l.line = chunks[1] + + if bytes.Equal(ref, []byte(head)) { + l.data.Head = &l.hash + } else { + l.data.References[string(ref)] = l.hash + } + + return decodeCaps +} + +func decodeCaps(p *advRefsDecoder) decoderStateFn { + if err := p.data.Capabilities.Decode(p.line); err != nil { + p.error("invalid capabilities: %s", err) + return nil + } + + return decodeOtherRefs +} + +// The refs are either tips (obj-id SP refname) or a peeled (obj-id SP refname^{}). +// If there are no refs, then there might be a shallow or flush-ptk. +func decodeOtherRefs(p *advRefsDecoder) decoderStateFn { + if ok := p.nextLine(); !ok { + return nil + } + + if bytes.HasPrefix(p.line, shallow) { + return decodeShallow + } + + if len(p.line) == 0 { + return nil + } + + saveTo := p.data.References + if bytes.HasSuffix(p.line, peeled) { + p.line = bytes.TrimSuffix(p.line, peeled) + saveTo = p.data.Peeled + } + + ref, hash, err := readRef(p.line) + if err != nil { + p.error("%s", err) + return nil + } + saveTo[ref] = hash + + return decodeOtherRefs +} + +// Reads a ref-name +func readRef(data []byte) (string, plumbing.Hash, error) { + chunks := bytes.Split(data, sp) + switch { + case len(chunks) == 1: + return "", plumbing.ZeroHash, fmt.Errorf("malformed ref data: no space was found") + case len(chunks) > 2: + return "", plumbing.ZeroHash, fmt.Errorf("malformed ref data: more than one space found") + default: + return string(chunks[1]), plumbing.NewHash(string(chunks[0])), nil + } +} + +// Keeps reading shallows until a flush-pkt is found +func decodeShallow(p *advRefsDecoder) decoderStateFn { + if !bytes.HasPrefix(p.line, shallow) { + p.error("malformed shallow prefix, found %q... instead", p.line[:len(shallow)]) + return nil + } + p.line = bytes.TrimPrefix(p.line, shallow) + + if len(p.line) != hashSize { + p.error(fmt.Sprintf( + "malformed shallow hash: wrong length, expected 40 bytes, read %d bytes", + len(p.line))) + return nil + } + + text := p.line[:hashSize] + var h plumbing.Hash + if _, err := hex.Decode(h[:], text); err != nil { + p.error("invalid hash text: %s", err) + return nil + } + + p.data.Shallows = append(p.data.Shallows, h) + + if ok := p.nextLine(); !ok { + return nil + } + + if len(p.line) == 0 { + return nil // successful parse of the advertised-refs message + } + + return decodeShallow +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_encode.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_encode.go new file mode 100644 index 0000000000..fb9bd883fc --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_encode.go @@ -0,0 +1,176 @@ +package packp + +import ( + "bytes" + "fmt" + "io" + "sort" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/format/pktline" + "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" +) + +// Encode writes the AdvRefs encoding to a writer. +// +// All the payloads will end with a newline character. Capabilities, +// references and shallows are written in alphabetical order, except for +// peeled references that always follow their corresponding references. +func (a *AdvRefs) Encode(w io.Writer) error { + e := newAdvRefsEncoder(w) + return e.Encode(a) +} + +type advRefsEncoder struct { + data *AdvRefs // data to encode + pe *pktline.Encoder // where to write the encoded data + firstRefName string // reference name to encode in the first pkt-line (HEAD if present) + firstRefHash plumbing.Hash // hash referenced to encode in the first pkt-line (HEAD if present) + sortedRefs []string // hash references to encode ordered by increasing order + err error // sticky error + +} + +func newAdvRefsEncoder(w io.Writer) *advRefsEncoder { + return &advRefsEncoder{ + pe: pktline.NewEncoder(w), + } +} + +func (e *advRefsEncoder) Encode(v *AdvRefs) error { + e.data = v + e.sortRefs() + e.setFirstRef() + + for state := encodePrefix; state != nil; { + state = state(e) + } + + return e.err +} + +func (e *advRefsEncoder) sortRefs() { + if len(e.data.References) > 0 { + refs := make([]string, 0, len(e.data.References)) + for refName := range e.data.References { + refs = append(refs, refName) + } + + sort.Strings(refs) + e.sortedRefs = refs + } +} + +func (e *advRefsEncoder) setFirstRef() { + if e.data.Head != nil { + e.firstRefName = head + e.firstRefHash = *e.data.Head + return + } + + if len(e.sortedRefs) > 0 { + refName := e.sortedRefs[0] + e.firstRefName = refName + e.firstRefHash = e.data.References[refName] + } +} + +type encoderStateFn func(*advRefsEncoder) encoderStateFn + +func encodePrefix(e *advRefsEncoder) encoderStateFn { + for _, p := range e.data.Prefix { + if bytes.Equal(p, pktline.Flush) { + if e.err = e.pe.Flush(); e.err != nil { + return nil + } + continue + } + if e.err = e.pe.Encodef("%s\n", string(p)); e.err != nil { + return nil + } + } + + return encodeFirstLine +} + +// Adds the first pkt-line payload: head hash, head ref and capabilities. +// If HEAD ref is not found, the first reference ordered in increasing order will be used. +// If there aren't HEAD neither refs, the first line will be "PKT-LINE(zero-id SP "capabilities^{}" NUL capability-list)". +// See: https://github.com/git/git/blob/master/Documentation/technical/pack-protocol.txt +// See: https://github.com/git/git/blob/master/Documentation/technical/protocol-common.txt +func encodeFirstLine(e *advRefsEncoder) encoderStateFn { + const formatFirstLine = "%s %s\x00%s\n" + var firstLine string + capabilities := formatCaps(e.data.Capabilities) + + if e.firstRefName == "" { + firstLine = fmt.Sprintf(formatFirstLine, plumbing.ZeroHash.String(), "capabilities^{}", capabilities) + } else { + firstLine = fmt.Sprintf(formatFirstLine, e.firstRefHash.String(), e.firstRefName, capabilities) + + } + + if e.err = e.pe.EncodeString(firstLine); e.err != nil { + return nil + } + + return encodeRefs +} + +func formatCaps(c *capability.List) string { + if c == nil { + return "" + } + + return c.String() +} + +// Adds the (sorted) refs: hash SP refname EOL +// and their peeled refs if any. +func encodeRefs(e *advRefsEncoder) encoderStateFn { + for _, r := range e.sortedRefs { + if r == e.firstRefName { + continue + } + + hash := e.data.References[r] + if e.err = e.pe.Encodef("%s %s\n", hash.String(), r); e.err != nil { + return nil + } + + if hash, ok := e.data.Peeled[r]; ok { + if e.err = e.pe.Encodef("%s %s^{}\n", hash.String(), r); e.err != nil { + return nil + } + } + } + + return encodeShallow +} + +// Adds the (sorted) shallows: "shallow" SP hash EOL +func encodeShallow(e *advRefsEncoder) encoderStateFn { + sorted := sortShallows(e.data.Shallows) + for _, hash := range sorted { + if e.err = e.pe.Encodef("shallow %s\n", hash); e.err != nil { + return nil + } + } + + return encodeFlush +} + +func sortShallows(c []plumbing.Hash) []string { + ret := []string{} + for _, h := range c { + ret = append(ret, h.String()) + } + sort.Strings(ret) + + return ret +} + +func encodeFlush(e *advRefsEncoder) encoderStateFn { + e.err = e.pe.Flush() + return nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/capability.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/capability.go new file mode 100644 index 0000000000..a129781157 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/capability.go @@ -0,0 +1,252 @@ +// Package capability defines the server and client capabilities. +package capability + +// Capability describes a server or client capability. +type Capability string + +func (n Capability) String() string { + return string(n) +} + +const ( + // MultiACK capability allows the server to return "ACK obj-id continue" as + // soon as it finds a commit that it can use as a common base, between the + // client's wants and the client's have set. + // + // By sending this early, the server can potentially head off the client + // from walking any further down that particular branch of the client's + // repository history. The client may still need to walk down other + // branches, sending have lines for those, until the server has a + // complete cut across the DAG, or the client has said "done". + // + // Without multi_ack, a client sends have lines in --date-order until + // the server has found a common base. That means the client will send + // have lines that are already known by the server to be common, because + // they overlap in time with another branch that the server hasn't found + // a common base on yet. + // + // For example suppose the client has commits in caps that the server + // doesn't and the server has commits in lower case that the client + // doesn't, as in the following diagram: + // + // +---- u ---------------------- x + // / +----- y + // / / + // a -- b -- c -- d -- E -- F + // \ + // +--- Q -- R -- S + // + // If the client wants x,y and starts out by saying have F,S, the server + // doesn't know what F,S is. Eventually the client says "have d" and + // the server sends "ACK d continue" to let the client know to stop + // walking down that line (so don't send c-b-a), but it's not done yet, + // it needs a base for x. The client keeps going with S-R-Q, until a + // gets reached, at which point the server has a clear base and it all + // ends. + // + // Without multi_ack the client would have sent that c-b-a chain anyway, + // interleaved with S-R-Q. + MultiACK Capability = "multi_ack" + // MultiACKDetailed is an extension of multi_ack that permits client to + // better understand the server's in-memory state. + MultiACKDetailed Capability = "multi_ack_detailed" + // NoDone should only be used with the smart HTTP protocol. If + // multi_ack_detailed and no-done are both present, then the sender is + // free to immediately send a pack following its first "ACK obj-id ready" + // message. + // + // Without no-done in the smart HTTP protocol, the server session would + // end and the client has to make another trip to send "done" before + // the server can send the pack. no-done removes the last round and + // thus slightly reduces latency. + NoDone Capability = "no-done" + // ThinPack is one with deltas which reference base objects not + // contained within the pack (but are known to exist at the receiving + // end). This can reduce the network traffic significantly, but it + // requires the receiving end to know how to "thicken" these packs by + // adding the missing bases to the pack. + // + // The upload-pack server advertises 'thin-pack' when it can generate + // and send a thin pack. A client requests the 'thin-pack' capability + // when it understands how to "thicken" it, notifying the server that + // it can receive such a pack. A client MUST NOT request the + // 'thin-pack' capability if it cannot turn a thin pack into a + // self-contained pack. + // + // Receive-pack, on the other hand, is assumed by default to be able to + // handle thin packs, but can ask the client not to use the feature by + // advertising the 'no-thin' capability. A client MUST NOT send a thin + // pack if the server advertises the 'no-thin' capability. + // + // The reasons for this asymmetry are historical. The receive-pack + // program did not exist until after the invention of thin packs, so + // historically the reference implementation of receive-pack always + // understood thin packs. Adding 'no-thin' later allowed receive-pack + // to disable the feature in a backwards-compatible manner. + ThinPack Capability = "thin-pack" + // Sideband means that server can send, and client understand multiplexed + // progress reports and error info interleaved with the packfile itself. + // + // These two options are mutually exclusive. A modern client always + // favors Sideband64k. + // + // Either mode indicates that the packfile data will be streamed broken + // up into packets of up to either 1000 bytes in the case of 'side_band', + // or 65520 bytes in the case of 'side_band_64k'. Each packet is made up + // of a leading 4-byte pkt-line length of how much data is in the packet, + // followed by a 1-byte stream code, followed by the actual data. + // + // The stream code can be one of: + // + // 1 - pack data + // 2 - progress messages + // 3 - fatal error message just before stream aborts + // + // The "side-band-64k" capability came about as a way for newer clients + // that can handle much larger packets to request packets that are + // actually crammed nearly full, while maintaining backward compatibility + // for the older clients. + // + // Further, with side-band and its up to 1000-byte messages, it's actually + // 999 bytes of payload and 1 byte for the stream code. With side-band-64k, + // same deal, you have up to 65519 bytes of data and 1 byte for the stream + // code. + // + // The client MUST send only maximum of one of "side-band" and "side- + // band-64k". Server MUST diagnose it as an error if client requests + // both. + Sideband Capability = "side-band" + Sideband64k Capability = "side-band-64k" + // OFSDelta server can send, and client understand PACKv2 with delta + // referring to its base by position in pack rather than by an obj-id. That + // is, they can send/read OBJ_OFS_DELTA (aka type 6) in a packfile. + OFSDelta Capability = "ofs-delta" + // Agent the server may optionally send this capability to notify the client + // that the server is running version `X`. The client may optionally return + // its own agent string by responding with an `agent=Y` capability (but it + // MUST NOT do so if the server did not mention the agent capability). The + // `X` and `Y` strings may contain any printable ASCII characters except + // space (i.e., the byte range 32 < x < 127), and are typically of the form + // "package/version" (e.g., "git/1.8.3.1"). The agent strings are purely + // informative for statistics and debugging purposes, and MUST NOT be used + // to programmatically assume the presence or absence of particular features. + Agent Capability = "agent" + // Shallow capability adds "deepen", "shallow" and "unshallow" commands to + // the fetch-pack/upload-pack protocol so clients can request shallow + // clones. + Shallow Capability = "shallow" + // DeepenSince adds "deepen-since" command to fetch-pack/upload-pack + // protocol so the client can request shallow clones that are cut at a + // specific time, instead of depth. Internally it's equivalent of doing + // "rev-list --max-age=" on the server side. "deepen-since" + // cannot be used with "deepen". + DeepenSince Capability = "deepen-since" + // DeepenNot adds "deepen-not" command to fetch-pack/upload-pack + // protocol so the client can request shallow clones that are cut at a + // specific revision, instead of depth. Internally it's equivalent of + // doing "rev-list --not " on the server side. "deepen-not" + // cannot be used with "deepen", but can be used with "deepen-since". + DeepenNot Capability = "deepen-not" + // DeepenRelative if this capability is requested by the client, the + // semantics of "deepen" command is changed. The "depth" argument is the + // depth from the current shallow boundary, instead of the depth from + // remote refs. + DeepenRelative Capability = "deepen-relative" + // NoProgress the client was started with "git clone -q" or something, and + // doesn't want that side band 2. Basically the client just says "I do not + // wish to receive stream 2 on sideband, so do not send it to me, and if + // you did, I will drop it on the floor anyway". However, the sideband + // channel 3 is still used for error responses. + NoProgress Capability = "no-progress" + // IncludeTag capability is about sending annotated tags if we are + // sending objects they point to. If we pack an object to the client, and + // a tag object points exactly at that object, we pack the tag object too. + // In general this allows a client to get all new annotated tags when it + // fetches a branch, in a single network connection. + // + // Clients MAY always send include-tag, hardcoding it into a request when + // the server advertises this capability. The decision for a client to + // request include-tag only has to do with the client's desires for tag + // data, whether or not a server had advertised objects in the + // refs/tags/* namespace. + // + // Servers MUST pack the tags if their referrant is packed and the client + // has requested include-tags. + // + // Clients MUST be prepared for the case where a server has ignored + // include-tag and has not actually sent tags in the pack. In such + // cases the client SHOULD issue a subsequent fetch to acquire the tags + // that include-tag would have otherwise given the client. + // + // The server SHOULD send include-tag, if it supports it, regardless + // of whether or not there are tags available. + IncludeTag Capability = "include-tag" + // ReportStatus the receive-pack process can receive a 'report-status' + // capability, which tells it that the client wants a report of what + // happened after a packfile upload and reference update. If the pushing + // client requests this capability, after unpacking and updating references + // the server will respond with whether the packfile unpacked successfully + // and if each reference was updated successfully. If any of those were not + // successful, it will send back an error message. See pack-protocol.txt + // for example messages. + ReportStatus Capability = "report-status" + // DeleteRefs If the server sends back this capability, it means that + // it is capable of accepting a zero-id value as the target + // value of a reference update. It is not sent back by the client, it + // simply informs the client that it can be sent zero-id values + // to delete references + DeleteRefs Capability = "delete-refs" + // Quiet If the receive-pack server advertises this capability, it is + // capable of silencing human-readable progress output which otherwise may + // be shown when processing the received pack. A send-pack client should + // respond with the 'quiet' capability to suppress server-side progress + // reporting if the local progress reporting is also being suppressed + // (e.g., via `push -q`, or if stderr does not go to a tty). + Quiet Capability = "quiet" + // Atomic If the server sends this capability it is capable of accepting + // atomic pushes. If the pushing client requests this capability, the server + // will update the refs in one atomic transaction. Either all refs are + // updated or none. + Atomic Capability = "atomic" + // PushOptions If the server sends this capability it is able to accept + // push options after the update commands have been sent, but before the + // packfile is streamed. If the pushing client requests this capability, + // the server will pass the options to the pre- and post- receive hooks + // that process this push request. + PushOptions Capability = "push-options" + // AllowTipSHA1InWant if the upload-pack server advertises this capability, + // fetch-pack may send "want" lines with SHA-1s that exist at the server but + // are not advertised by upload-pack. + AllowTipSHA1InWant Capability = "allow-tip-sha1-in-want" + // AllowReachableSHA1InWant if the upload-pack server advertises this + // capability, fetch-pack may send "want" lines with SHA-1s that exist at + // the server but are not advertised by upload-pack. + AllowReachableSHA1InWant Capability = "allow-reachable-sha1-in-want" + // PushCert the receive-pack server that advertises this capability is + // willing to accept a signed push certificate, and asks the to be + // included in the push certificate. A send-pack client MUST NOT + // send a push-cert packet unless the receive-pack server advertises + // this capability. + PushCert Capability = "push-cert" + // SymRef symbolic reference support for better negotiation. + SymRef Capability = "symref" +) + +const DefaultAgent = "go-git/4.x" + +var known = map[Capability]bool{ + MultiACK: true, MultiACKDetailed: true, NoDone: true, ThinPack: true, + Sideband: true, Sideband64k: true, OFSDelta: true, Agent: true, + Shallow: true, DeepenSince: true, DeepenNot: true, DeepenRelative: true, + NoProgress: true, IncludeTag: true, ReportStatus: true, DeleteRefs: true, + Quiet: true, Atomic: true, PushOptions: true, AllowTipSHA1InWant: true, + AllowReachableSHA1InWant: true, PushCert: true, SymRef: true, +} + +var requiresArgument = map[Capability]bool{ + Agent: true, PushCert: true, SymRef: true, +} + +var multipleArgument = map[Capability]bool{ + SymRef: true, +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/list.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/list.go new file mode 100644 index 0000000000..f41ec799cf --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/list.go @@ -0,0 +1,193 @@ +package capability + +import ( + "bytes" + "errors" + "fmt" + "strings" +) + +var ( + // ErrArgumentsRequired is returned if no arguments are giving with a + // capability that requires arguments + ErrArgumentsRequired = errors.New("arguments required") + // ErrArguments is returned if arguments are given with a capabilities that + // not supports arguments + ErrArguments = errors.New("arguments not allowed") + // ErrEmptyArgument is returned when an empty value is given + ErrEmptyArgument = errors.New("empty argument") + // ErrMultipleArguments multiple argument given to a capabilities that not + // support it + ErrMultipleArguments = errors.New("multiple arguments not allowed") +) + +// List represents a list of capabilities +type List struct { + m map[Capability]*entry + sort []string +} + +type entry struct { + Name Capability + Values []string +} + +// NewList returns a new List of capabilities +func NewList() *List { + return &List{ + m: make(map[Capability]*entry), + } +} + +// IsEmpty returns true if the List is empty +func (l *List) IsEmpty() bool { + return len(l.sort) == 0 +} + +// Decode decodes list of capabilities from raw into the list +func (l *List) Decode(raw []byte) error { + // git 1.x receive pack used to send a leading space on its + // git-receive-pack capabilities announcement. We just trim space to be + // tolerant to space changes in different versions. + raw = bytes.TrimSpace(raw) + + if len(raw) == 0 { + return nil + } + + for _, data := range bytes.Split(raw, []byte{' '}) { + pair := bytes.SplitN(data, []byte{'='}, 2) + + c := Capability(pair[0]) + if len(pair) == 1 { + if err := l.Add(c); err != nil { + return err + } + + continue + } + + if err := l.Add(c, string(pair[1])); err != nil { + return err + } + } + + return nil +} + +// Get returns the values for a capability +func (l *List) Get(capability Capability) []string { + if _, ok := l.m[capability]; !ok { + return nil + } + + return l.m[capability].Values +} + +// Set sets a capability removing the previous values +func (l *List) Set(capability Capability, values ...string) error { + delete(l.m, capability) + return l.Add(capability, values...) +} + +// Add adds a capability, values are optional +func (l *List) Add(c Capability, values ...string) error { + if err := l.validate(c, values); err != nil { + return err + } + + if !l.Supports(c) { + l.m[c] = &entry{Name: c} + l.sort = append(l.sort, c.String()) + } + + if len(values) == 0 { + return nil + } + + if known[c] && !multipleArgument[c] && len(l.m[c].Values) > 0 { + return ErrMultipleArguments + } + + l.m[c].Values = append(l.m[c].Values, values...) + return nil +} + +func (l *List) validateNoEmptyArgs(values []string) error { + for _, v := range values { + if v == "" { + return ErrEmptyArgument + } + } + return nil +} + +func (l *List) validate(c Capability, values []string) error { + if !known[c] { + return l.validateNoEmptyArgs(values) + } + if requiresArgument[c] && len(values) == 0 { + return ErrArgumentsRequired + } + + if !requiresArgument[c] && len(values) != 0 { + return ErrArguments + } + + if !multipleArgument[c] && len(values) > 1 { + return ErrMultipleArguments + } + return l.validateNoEmptyArgs(values) +} + +// Supports returns true if capability is present +func (l *List) Supports(capability Capability) bool { + _, ok := l.m[capability] + return ok +} + +// Delete deletes a capability from the List +func (l *List) Delete(capability Capability) { + if !l.Supports(capability) { + return + } + + delete(l.m, capability) + for i, c := range l.sort { + if c != string(capability) { + continue + } + + l.sort = append(l.sort[:i], l.sort[i+1:]...) + return + } +} + +// All returns a slice with all defined capabilities. +func (l *List) All() []Capability { + var cs []Capability + for _, key := range l.sort { + cs = append(cs, Capability(key)) + } + + return cs +} + +// String generates the capabilities strings, the capabilities are sorted in +// insertion order +func (l *List) String() string { + var o []string + for _, key := range l.sort { + cap := l.m[Capability(key)] + if len(cap.Values) == 0 { + o = append(o, key) + continue + } + + for _, value := range cap.Values { + o = append(o, fmt.Sprintf("%s=%s", key, value)) + } + } + + return strings.Join(o, " ") +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/common.go new file mode 100644 index 0000000000..ab07ac8f74 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/common.go @@ -0,0 +1,70 @@ +package packp + +import ( + "fmt" +) + +type stateFn func() stateFn + +const ( + // common + hashSize = 40 + + // advrefs + head = "HEAD" + noHead = "capabilities^{}" +) + +var ( + // common + sp = []byte(" ") + eol = []byte("\n") + eq = []byte{'='} + + // advertised-refs + null = []byte("\x00") + peeled = []byte("^{}") + noHeadMark = []byte(" capabilities^{}\x00") + + // upload-request + want = []byte("want ") + shallow = []byte("shallow ") + deepen = []byte("deepen") + deepenCommits = []byte("deepen ") + deepenSince = []byte("deepen-since ") + deepenReference = []byte("deepen-not ") + + // shallow-update + unshallow = []byte("unshallow ") + + // server-response + ack = []byte("ACK") + nak = []byte("NAK") + + // updreq + shallowNoSp = []byte("shallow") +) + +func isFlush(payload []byte) bool { + return len(payload) == 0 +} + +// ErrUnexpectedData represents an unexpected data decoding a message +type ErrUnexpectedData struct { + Msg string + Data []byte +} + +// NewErrUnexpectedData returns a new ErrUnexpectedData containing the data and +// the message given +func NewErrUnexpectedData(msg string, data []byte) error { + return &ErrUnexpectedData{Msg: msg, Data: data} +} + +func (err *ErrUnexpectedData) Error() string { + if len(err.Data) == 0 { + return err.Msg + } + + return fmt.Sprintf("%s (%s)", err.Msg, err.Data) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/doc.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/doc.go new file mode 100644 index 0000000000..4950d1d662 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/doc.go @@ -0,0 +1,724 @@ +package packp + +/* + +A nice way to trace the real data transmitted and received by git, use: + +GIT_TRACE_PACKET=true git ls-remote http://github.com/src-d/go-git +GIT_TRACE_PACKET=true git clone http://github.com/src-d/go-git + +Here follows a copy of the current protocol specification at the time of +this writing. + +(Please notice that most http git servers will add a flush-pkt after the +first pkt-line when using HTTP smart.) + + +Documentation Common to Pack and Http Protocols +=============================================== + +ABNF Notation +------------- + +ABNF notation as described by RFC 5234 is used within the protocol documents, +except the following replacement core rules are used: +---- + HEXDIG = DIGIT / "a" / "b" / "c" / "d" / "e" / "f" +---- + +We also define the following common rules: +---- + NUL = %x00 + zero-id = 40*"0" + obj-id = 40*(HEXDIGIT) + + refname = "HEAD" + refname /= "refs/" +---- + +A refname is a hierarchical octet string beginning with "refs/" and +not violating the 'git-check-ref-format' command's validation rules. +More specifically, they: + +. They can include slash `/` for hierarchical (directory) + grouping, but no slash-separated component can begin with a + dot `.`. + +. They must contain at least one `/`. This enforces the presence of a + category like `heads/`, `tags/` etc. but the actual names are not + restricted. + +. They cannot have two consecutive dots `..` anywhere. + +. They cannot have ASCII control characters (i.e. bytes whose + values are lower than \040, or \177 `DEL`), space, tilde `~`, + caret `^`, colon `:`, question-mark `?`, asterisk `*`, + or open bracket `[` anywhere. + +. They cannot end with a slash `/` or a dot `.`. + +. They cannot end with the sequence `.lock`. + +. They cannot contain a sequence `@{`. + +. They cannot contain a `\\`. + + +pkt-line Format +--------------- + +Much (but not all) of the payload is described around pkt-lines. + +A pkt-line is a variable length binary string. The first four bytes +of the line, the pkt-len, indicates the total length of the line, +in hexadecimal. The pkt-len includes the 4 bytes used to contain +the length's hexadecimal representation. + +A pkt-line MAY contain binary data, so implementors MUST ensure +pkt-line parsing/formatting routines are 8-bit clean. + +A non-binary line SHOULD BE terminated by an LF, which if present +MUST be included in the total length. Receivers MUST treat pkt-lines +with non-binary data the same whether or not they contain the trailing +LF (stripping the LF if present, and not complaining when it is +missing). + +The maximum length of a pkt-line's data component is 65516 bytes. +Implementations MUST NOT send pkt-line whose length exceeds 65520 +(65516 bytes of payload + 4 bytes of length data). + +Implementations SHOULD NOT send an empty pkt-line ("0004"). + +A pkt-line with a length field of 0 ("0000"), called a flush-pkt, +is a special case and MUST be handled differently than an empty +pkt-line ("0004"). + +---- + pkt-line = data-pkt / flush-pkt + + data-pkt = pkt-len pkt-payload + pkt-len = 4*(HEXDIG) + pkt-payload = (pkt-len - 4)*(OCTET) + + flush-pkt = "0000" +---- + +Examples (as C-style strings): + +---- + pkt-line actual value + --------------------------------- + "0006a\n" "a\n" + "0005a" "a" + "000bfoobar\n" "foobar\n" + "0004" "" +---- + +Packfile transfer protocols +=========================== + +Git supports transferring data in packfiles over the ssh://, git://, http:// and +file:// transports. There exist two sets of protocols, one for pushing +data from a client to a server and another for fetching data from a +server to a client. The three transports (ssh, git, file) use the same +protocol to transfer data. http is documented in http-protocol.txt. + +The processes invoked in the canonical Git implementation are 'upload-pack' +on the server side and 'fetch-pack' on the client side for fetching data; +then 'receive-pack' on the server and 'send-pack' on the client for pushing +data. The protocol functions to have a server tell a client what is +currently on the server, then for the two to negotiate the smallest amount +of data to send in order to fully update one or the other. + +pkt-line Format +--------------- + +The descriptions below build on the pkt-line format described in +protocol-common.txt. When the grammar indicate `PKT-LINE(...)`, unless +otherwise noted the usual pkt-line LF rules apply: the sender SHOULD +include a LF, but the receiver MUST NOT complain if it is not present. + +Transports +---------- +There are three transports over which the packfile protocol is +initiated. The Git transport is a simple, unauthenticated server that +takes the command (almost always 'upload-pack', though Git +servers can be configured to be globally writable, in which 'receive- +pack' initiation is also allowed) with which the client wishes to +communicate and executes it and connects it to the requesting +process. + +In the SSH transport, the client just runs the 'upload-pack' +or 'receive-pack' process on the server over the SSH protocol and then +communicates with that invoked process over the SSH connection. + +The file:// transport runs the 'upload-pack' or 'receive-pack' +process locally and communicates with it over a pipe. + +Git Transport +------------- + +The Git transport starts off by sending the command and repository +on the wire using the pkt-line format, followed by a NUL byte and a +hostname parameter, terminated by a NUL byte. + + 0032git-upload-pack /project.git\0host=myserver.com\0 + +-- + git-proto-request = request-command SP pathname NUL [ host-parameter NUL ] + request-command = "git-upload-pack" / "git-receive-pack" / + "git-upload-archive" ; case sensitive + pathname = *( %x01-ff ) ; exclude NUL + host-parameter = "host=" hostname [ ":" port ] +-- + +Only host-parameter is allowed in the git-proto-request. Clients +MUST NOT attempt to send additional parameters. It is used for the +git-daemon name based virtual hosting. See --interpolated-path +option to git daemon, with the %H/%CH format characters. + +Basically what the Git client is doing to connect to an 'upload-pack' +process on the server side over the Git protocol is this: + + $ echo -e -n \ + "0039git-upload-pack /schacon/gitbook.git\0host=example.com\0" | + nc -v example.com 9418 + +If the server refuses the request for some reasons, it could abort +gracefully with an error message. + +---- + error-line = PKT-LINE("ERR" SP explanation-text) +---- + + +SSH Transport +------------- + +Initiating the upload-pack or receive-pack processes over SSH is +executing the binary on the server via SSH remote execution. +It is basically equivalent to running this: + + $ ssh git.example.com "git-upload-pack '/project.git'" + +For a server to support Git pushing and pulling for a given user over +SSH, that user needs to be able to execute one or both of those +commands via the SSH shell that they are provided on login. On some +systems, that shell access is limited to only being able to run those +two commands, or even just one of them. + +In an ssh:// format URI, it's absolute in the URI, so the '/' after +the host name (or port number) is sent as an argument, which is then +read by the remote git-upload-pack exactly as is, so it's effectively +an absolute path in the remote filesystem. + + git clone ssh://user@example.com/project.git + | + v + ssh user@example.com "git-upload-pack '/project.git'" + +In a "user@host:path" format URI, its relative to the user's home +directory, because the Git client will run: + + git clone user@example.com:project.git + | + v + ssh user@example.com "git-upload-pack 'project.git'" + +The exception is if a '~' is used, in which case +we execute it without the leading '/'. + + ssh://user@example.com/~alice/project.git, + | + v + ssh user@example.com "git-upload-pack '~alice/project.git'" + +A few things to remember here: + +- The "command name" is spelled with dash (e.g. git-upload-pack), but + this can be overridden by the client; + +- The repository path is always quoted with single quotes. + +Fetching Data From a Server +--------------------------- + +When one Git repository wants to get data that a second repository +has, the first can 'fetch' from the second. This operation determines +what data the server has that the client does not then streams that +data down to the client in packfile format. + + +Reference Discovery +------------------- + +When the client initially connects the server will immediately respond +with a listing of each reference it has (all branches and tags) along +with the object name that each reference currently points to. + + $ echo -e -n "0039git-upload-pack /schacon/gitbook.git\0host=example.com\0" | + nc -v example.com 9418 + 00887217a7c7e582c46cec22a130adf4b9d7d950fba0 HEAD\0multi_ack thin-pack + side-band side-band-64k ofs-delta shallow no-progress include-tag + 00441d3fcd5ced445d1abc402225c0b8a1299641f497 refs/heads/integration + 003f7217a7c7e582c46cec22a130adf4b9d7d950fba0 refs/heads/master + 003cb88d2441cac0977faf98efc80305012112238d9d refs/tags/v0.9 + 003c525128480b96c89e6418b1e40909bf6c5b2d580f refs/tags/v1.0 + 003fe92df48743b7bc7d26bcaabfddde0a1e20cae47c refs/tags/v1.0^{} + 0000 + +The returned response is a pkt-line stream describing each ref and +its current value. The stream MUST be sorted by name according to +the C locale ordering. + +If HEAD is a valid ref, HEAD MUST appear as the first advertised +ref. If HEAD is not a valid ref, HEAD MUST NOT appear in the +advertisement list at all, but other refs may still appear. + +The stream MUST include capability declarations behind a NUL on the +first ref. The peeled value of a ref (that is "ref^{}") MUST be +immediately after the ref itself, if presented. A conforming server +MUST peel the ref if it's an annotated tag. + +---- + advertised-refs = (no-refs / list-of-refs) + *shallow + flush-pkt + + no-refs = PKT-LINE(zero-id SP "capabilities^{}" + NUL capability-list) + + list-of-refs = first-ref *other-ref + first-ref = PKT-LINE(obj-id SP refname + NUL capability-list) + + other-ref = PKT-LINE(other-tip / other-peeled) + other-tip = obj-id SP refname + other-peeled = obj-id SP refname "^{}" + + shallow = PKT-LINE("shallow" SP obj-id) + + capability-list = capability *(SP capability) + capability = 1*(LC_ALPHA / DIGIT / "-" / "_") + LC_ALPHA = %x61-7A +---- + +Server and client MUST use lowercase for obj-id, both MUST treat obj-id +as case-insensitive. + +See protocol-capabilities.txt for a list of allowed server capabilities +and descriptions. + +Packfile Negotiation +-------------------- +After reference and capabilities discovery, the client can decide to +terminate the connection by sending a flush-pkt, telling the server it can +now gracefully terminate, and disconnect, when it does not need any pack +data. This can happen with the ls-remote command, and also can happen when +the client already is up-to-date. + +Otherwise, it enters the negotiation phase, where the client and +server determine what the minimal packfile necessary for transport is, +by telling the server what objects it wants, its shallow objects +(if any), and the maximum commit depth it wants (if any). The client +will also send a list of the capabilities it wants to be in effect, +out of what the server said it could do with the first 'want' line. + +---- + upload-request = want-list + *shallow-line + *1depth-request + flush-pkt + + want-list = first-want + *additional-want + + shallow-line = PKT-LINE("shallow" SP obj-id) + + depth-request = PKT-LINE("deepen" SP depth) / + PKT-LINE("deepen-since" SP timestamp) / + PKT-LINE("deepen-not" SP ref) + + first-want = PKT-LINE("want" SP obj-id SP capability-list) + additional-want = PKT-LINE("want" SP obj-id) + + depth = 1*DIGIT +---- + +Clients MUST send all the obj-ids it wants from the reference +discovery phase as 'want' lines. Clients MUST send at least one +'want' command in the request body. Clients MUST NOT mention an +obj-id in a 'want' command which did not appear in the response +obtained through ref discovery. + +The client MUST write all obj-ids which it only has shallow copies +of (meaning that it does not have the parents of a commit) as +'shallow' lines so that the server is aware of the limitations of +the client's history. + +The client now sends the maximum commit history depth it wants for +this transaction, which is the number of commits it wants from the +tip of the history, if any, as a 'deepen' line. A depth of 0 is the +same as not making a depth request. The client does not want to receive +any commits beyond this depth, nor does it want objects needed only to +complete those commits. Commits whose parents are not received as a +result are defined as shallow and marked as such in the server. This +information is sent back to the client in the next step. + +Once all the 'want's and 'shallow's (and optional 'deepen') are +transferred, clients MUST send a flush-pkt, to tell the server side +that it is done sending the list. + +Otherwise, if the client sent a positive depth request, the server +will determine which commits will and will not be shallow and +send this information to the client. If the client did not request +a positive depth, this step is skipped. + +---- + shallow-update = *shallow-line + *unshallow-line + flush-pkt + + shallow-line = PKT-LINE("shallow" SP obj-id) + + unshallow-line = PKT-LINE("unshallow" SP obj-id) +---- + +If the client has requested a positive depth, the server will compute +the set of commits which are no deeper than the desired depth. The set +of commits start at the client's wants. + +The server writes 'shallow' lines for each +commit whose parents will not be sent as a result. The server writes +an 'unshallow' line for each commit which the client has indicated is +shallow, but is no longer shallow at the currently requested depth +(that is, its parents will now be sent). The server MUST NOT mark +as unshallow anything which the client has not indicated was shallow. + +Now the client will send a list of the obj-ids it has using 'have' +lines, so the server can make a packfile that only contains the objects +that the client needs. In multi_ack mode, the canonical implementation +will send up to 32 of these at a time, then will send a flush-pkt. The +canonical implementation will skip ahead and send the next 32 immediately, +so that there is always a block of 32 "in-flight on the wire" at a time. + +---- + upload-haves = have-list + compute-end + + have-list = *have-line + have-line = PKT-LINE("have" SP obj-id) + compute-end = flush-pkt / PKT-LINE("done") +---- + +If the server reads 'have' lines, it then will respond by ACKing any +of the obj-ids the client said it had that the server also has. The +server will ACK obj-ids differently depending on which ack mode is +chosen by the client. + +In multi_ack mode: + + * the server will respond with 'ACK obj-id continue' for any common + commits. + + * once the server has found an acceptable common base commit and is + ready to make a packfile, it will blindly ACK all 'have' obj-ids + back to the client. + + * the server will then send a 'NAK' and then wait for another response + from the client - either a 'done' or another list of 'have' lines. + +In multi_ack_detailed mode: + + * the server will differentiate the ACKs where it is signaling + that it is ready to send data with 'ACK obj-id ready' lines, and + signals the identified common commits with 'ACK obj-id common' lines. + +Without either multi_ack or multi_ack_detailed: + + * upload-pack sends "ACK obj-id" on the first common object it finds. + After that it says nothing until the client gives it a "done". + + * upload-pack sends "NAK" on a flush-pkt if no common object + has been found yet. If one has been found, and thus an ACK + was already sent, it's silent on the flush-pkt. + +After the client has gotten enough ACK responses that it can determine +that the server has enough information to send an efficient packfile +(in the canonical implementation, this is determined when it has received +enough ACKs that it can color everything left in the --date-order queue +as common with the server, or the --date-order queue is empty), or the +client determines that it wants to give up (in the canonical implementation, +this is determined when the client sends 256 'have' lines without getting +any of them ACKed by the server - meaning there is nothing in common and +the server should just send all of its objects), then the client will send +a 'done' command. The 'done' command signals to the server that the client +is ready to receive its packfile data. + +However, the 256 limit *only* turns on in the canonical client +implementation if we have received at least one "ACK %s continue" +during a prior round. This helps to ensure that at least one common +ancestor is found before we give up entirely. + +Once the 'done' line is read from the client, the server will either +send a final 'ACK obj-id' or it will send a 'NAK'. 'obj-id' is the object +name of the last commit determined to be common. The server only sends +ACK after 'done' if there is at least one common base and multi_ack or +multi_ack_detailed is enabled. The server always sends NAK after 'done' +if there is no common base found. + +Then the server will start sending its packfile data. + +---- + server-response = *ack_multi ack / nak + ack_multi = PKT-LINE("ACK" SP obj-id ack_status) + ack_status = "continue" / "common" / "ready" + ack = PKT-LINE("ACK" SP obj-id) + nak = PKT-LINE("NAK") +---- + +A simple clone may look like this (with no 'have' lines): + +---- + C: 0054want 74730d410fcb6603ace96f1dc55ea6196122532d multi_ack \ + side-band-64k ofs-delta\n + C: 0032want 7d1665144a3a975c05f1f43902ddaf084e784dbe\n + C: 0032want 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a\n + C: 0032want 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01\n + C: 0032want 74730d410fcb6603ace96f1dc55ea6196122532d\n + C: 0000 + C: 0009done\n + + S: 0008NAK\n + S: [PACKFILE] +---- + +An incremental update (fetch) response might look like this: + +---- + C: 0054want 74730d410fcb6603ace96f1dc55ea6196122532d multi_ack \ + side-band-64k ofs-delta\n + C: 0032want 7d1665144a3a975c05f1f43902ddaf084e784dbe\n + C: 0032want 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a\n + C: 0000 + C: 0032have 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01\n + C: [30 more have lines] + C: 0032have 74730d410fcb6603ace96f1dc55ea6196122532d\n + C: 0000 + + S: 003aACK 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01 continue\n + S: 003aACK 74730d410fcb6603ace96f1dc55ea6196122532d continue\n + S: 0008NAK\n + + C: 0009done\n + + S: 0031ACK 74730d410fcb6603ace96f1dc55ea6196122532d\n + S: [PACKFILE] +---- + + +Packfile Data +------------- + +Now that the client and server have finished negotiation about what +the minimal amount of data that needs to be sent to the client is, the server +will construct and send the required data in packfile format. + +See pack-format.txt for what the packfile itself actually looks like. + +If 'side-band' or 'side-band-64k' capabilities have been specified by +the client, the server will send the packfile data multiplexed. + +Each packet starting with the packet-line length of the amount of data +that follows, followed by a single byte specifying the sideband the +following data is coming in on. + +In 'side-band' mode, it will send up to 999 data bytes plus 1 control +code, for a total of up to 1000 bytes in a pkt-line. In 'side-band-64k' +mode it will send up to 65519 data bytes plus 1 control code, for a +total of up to 65520 bytes in a pkt-line. + +The sideband byte will be a '1', '2' or a '3'. Sideband '1' will contain +packfile data, sideband '2' will be used for progress information that the +client will generally print to stderr and sideband '3' is used for error +information. + +If no 'side-band' capability was specified, the server will stream the +entire packfile without multiplexing. + + +Pushing Data To a Server +------------------------ + +Pushing data to a server will invoke the 'receive-pack' process on the +server, which will allow the client to tell it which references it should +update and then send all the data the server will need for those new +references to be complete. Once all the data is received and validated, +the server will then update its references to what the client specified. + +Authentication +-------------- + +The protocol itself contains no authentication mechanisms. That is to be +handled by the transport, such as SSH, before the 'receive-pack' process is +invoked. If 'receive-pack' is configured over the Git transport, those +repositories will be writable by anyone who can access that port (9418) as +that transport is unauthenticated. + +Reference Discovery +------------------- + +The reference discovery phase is done nearly the same way as it is in the +fetching protocol. Each reference obj-id and name on the server is sent +in packet-line format to the client, followed by a flush-pkt. The only +real difference is that the capability listing is different - the only +possible values are 'report-status', 'delete-refs', 'ofs-delta' and +'push-options'. + +Reference Update Request and Packfile Transfer +---------------------------------------------- + +Once the client knows what references the server is at, it can send a +list of reference update requests. For each reference on the server +that it wants to update, it sends a line listing the obj-id currently on +the server, the obj-id the client would like to update it to and the name +of the reference. + +This list is followed by a flush-pkt. Then the push options are transmitted +one per packet followed by another flush-pkt. After that the packfile that +should contain all the objects that the server will need to complete the new +references will be sent. + +---- + update-request = *shallow ( command-list | push-cert ) [packfile] + + shallow = PKT-LINE("shallow" SP obj-id) + + command-list = PKT-LINE(command NUL capability-list) + *PKT-LINE(command) + flush-pkt + + command = create / delete / update + create = zero-id SP new-id SP name + delete = old-id SP zero-id SP name + update = old-id SP new-id SP name + + old-id = obj-id + new-id = obj-id + + push-cert = PKT-LINE("push-cert" NUL capability-list LF) + PKT-LINE("certificate version 0.1" LF) + PKT-LINE("pusher" SP ident LF) + PKT-LINE("pushee" SP url LF) + PKT-LINE("nonce" SP nonce LF) + PKT-LINE(LF) + *PKT-LINE(command LF) + *PKT-LINE(gpg-signature-lines LF) + PKT-LINE("push-cert-end" LF) + + packfile = "PACK" 28*(OCTET) +---- + +If the receiving end does not support delete-refs, the sending end MUST +NOT ask for delete command. + +If the receiving end does not support push-cert, the sending end +MUST NOT send a push-cert command. When a push-cert command is +sent, command-list MUST NOT be sent; the commands recorded in the +push certificate is used instead. + +The packfile MUST NOT be sent if the only command used is 'delete'. + +A packfile MUST be sent if either create or update command is used, +even if the server already has all the necessary objects. In this +case the client MUST send an empty packfile. The only time this +is likely to happen is if the client is creating +a new branch or a tag that points to an existing obj-id. + +The server will receive the packfile, unpack it, then validate each +reference that is being updated that it hasn't changed while the request +was being processed (the obj-id is still the same as the old-id), and +it will run any update hooks to make sure that the update is acceptable. +If all of that is fine, the server will then update the references. + +Push Certificate +---------------- + +A push certificate begins with a set of header lines. After the +header and an empty line, the protocol commands follow, one per +line. Note that the trailing LF in push-cert PKT-LINEs is _not_ +optional; it must be present. + +Currently, the following header fields are defined: + +`pusher` ident:: + Identify the GPG key in "Human Readable Name " + format. + +`pushee` url:: + The repository URL (anonymized, if the URL contains + authentication material) the user who ran `git push` + intended to push into. + +`nonce` nonce:: + The 'nonce' string the receiving repository asked the + pushing user to include in the certificate, to prevent + replay attacks. + +The GPG signature lines are a detached signature for the contents +recorded in the push certificate before the signature block begins. +The detached signature is used to certify that the commands were +given by the pusher, who must be the signer. + +Report Status +------------- + +After receiving the pack data from the sender, the receiver sends a +report if 'report-status' capability is in effect. +It is a short listing of what happened in that update. It will first +list the status of the packfile unpacking as either 'unpack ok' or +'unpack [error]'. Then it will list the status for each of the references +that it tried to update. Each line is either 'ok [refname]' if the +update was successful, or 'ng [refname] [error]' if the update was not. + +---- + report-status = unpack-status + 1*(command-status) + flush-pkt + + unpack-status = PKT-LINE("unpack" SP unpack-result) + unpack-result = "ok" / error-msg + + command-status = command-ok / command-fail + command-ok = PKT-LINE("ok" SP refname) + command-fail = PKT-LINE("ng" SP refname SP error-msg) + + error-msg = 1*(OCTECT) ; where not "ok" +---- + +Updates can be unsuccessful for a number of reasons. The reference can have +changed since the reference discovery phase was originally sent, meaning +someone pushed in the meantime. The reference being pushed could be a +non-fast-forward reference and the update hooks or configuration could be +set to not allow that, etc. Also, some references can be updated while others +can be rejected. + +An example client/server communication might look like this: + +---- + S: 007c74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/local\0report-status delete-refs ofs-delta\n + S: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe refs/heads/debug\n + S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/master\n + S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/team\n + S: 0000 + + C: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe 74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/debug\n + C: 003e74730d410fcb6603ace96f1dc55ea6196122532d 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a refs/heads/master\n + C: 0000 + C: [PACKDATA] + + S: 000eunpack ok\n + S: 0018ok refs/heads/debug\n + S: 002ang refs/heads/master non-fast-forward\n +---- +*/ diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/report_status.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/report_status.go new file mode 100644 index 0000000000..e2a0a108b2 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/report_status.go @@ -0,0 +1,165 @@ +package packp + +import ( + "bytes" + "fmt" + "io" + "strings" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/format/pktline" +) + +const ( + ok = "ok" +) + +// ReportStatus is a report status message, as used in the git-receive-pack +// process whenever the 'report-status' capability is negotiated. +type ReportStatus struct { + UnpackStatus string + CommandStatuses []*CommandStatus +} + +// NewReportStatus creates a new ReportStatus message. +func NewReportStatus() *ReportStatus { + return &ReportStatus{} +} + +// Error returns the first error if any. +func (s *ReportStatus) Error() error { + if s.UnpackStatus != ok { + return fmt.Errorf("unpack error: %s", s.UnpackStatus) + } + + for _, s := range s.CommandStatuses { + if err := s.Error(); err != nil { + return err + } + } + + return nil +} + +// Encode writes the report status to a writer. +func (s *ReportStatus) Encode(w io.Writer) error { + e := pktline.NewEncoder(w) + if err := e.Encodef("unpack %s\n", s.UnpackStatus); err != nil { + return err + } + + for _, cs := range s.CommandStatuses { + if err := cs.encode(w); err != nil { + return err + } + } + + return e.Flush() +} + +// Decode reads from the given reader and decodes a report-status message. It +// does not read more input than what is needed to fill the report status. +func (s *ReportStatus) Decode(r io.Reader) error { + scan := pktline.NewScanner(r) + if err := s.scanFirstLine(scan); err != nil { + return err + } + + if err := s.decodeReportStatus(scan.Bytes()); err != nil { + return err + } + + flushed := false + for scan.Scan() { + b := scan.Bytes() + if isFlush(b) { + flushed = true + break + } + + if err := s.decodeCommandStatus(b); err != nil { + return err + } + } + + if !flushed { + return fmt.Errorf("missing flush") + } + + return scan.Err() +} + +func (s *ReportStatus) scanFirstLine(scan *pktline.Scanner) error { + if scan.Scan() { + return nil + } + + if scan.Err() != nil { + return scan.Err() + } + + return io.ErrUnexpectedEOF +} + +func (s *ReportStatus) decodeReportStatus(b []byte) error { + if isFlush(b) { + return fmt.Errorf("premature flush") + } + + b = bytes.TrimSuffix(b, eol) + + line := string(b) + fields := strings.SplitN(line, " ", 2) + if len(fields) != 2 || fields[0] != "unpack" { + return fmt.Errorf("malformed unpack status: %s", line) + } + + s.UnpackStatus = fields[1] + return nil +} + +func (s *ReportStatus) decodeCommandStatus(b []byte) error { + b = bytes.TrimSuffix(b, eol) + + line := string(b) + fields := strings.SplitN(line, " ", 3) + status := ok + if len(fields) == 3 && fields[0] == "ng" { + status = fields[2] + } else if len(fields) != 2 || fields[0] != "ok" { + return fmt.Errorf("malformed command status: %s", line) + } + + cs := &CommandStatus{ + ReferenceName: plumbing.ReferenceName(fields[1]), + Status: status, + } + s.CommandStatuses = append(s.CommandStatuses, cs) + return nil +} + +// CommandStatus is the status of a reference in a report status. +// See ReportStatus struct. +type CommandStatus struct { + ReferenceName plumbing.ReferenceName + Status string +} + +// Error returns the error, if any. +func (s *CommandStatus) Error() error { + if s.Status == ok { + return nil + } + + return fmt.Errorf("command error on %s: %s", + s.ReferenceName.String(), s.Status) +} + +func (s *CommandStatus) encode(w io.Writer) error { + e := pktline.NewEncoder(w) + if s.Error() == nil { + return e.Encodef("ok %s\n", s.ReferenceName.String()) + } + + return e.Encodef("ng %s %s\n", s.ReferenceName.String(), s.Status) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/shallowupd.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/shallowupd.go new file mode 100644 index 0000000000..fe4fe68879 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/shallowupd.go @@ -0,0 +1,92 @@ +package packp + +import ( + "bytes" + "fmt" + "io" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/format/pktline" +) + +const ( + shallowLineLen = 48 + unshallowLineLen = 50 +) + +type ShallowUpdate struct { + Shallows []plumbing.Hash + Unshallows []plumbing.Hash +} + +func (r *ShallowUpdate) Decode(reader io.Reader) error { + s := pktline.NewScanner(reader) + + for s.Scan() { + line := s.Bytes() + line = bytes.TrimSpace(line) + + var err error + switch { + case bytes.HasPrefix(line, shallow): + err = r.decodeShallowLine(line) + case bytes.HasPrefix(line, unshallow): + err = r.decodeUnshallowLine(line) + case bytes.Equal(line, pktline.Flush): + return nil + } + + if err != nil { + return err + } + } + + return s.Err() +} + +func (r *ShallowUpdate) decodeShallowLine(line []byte) error { + hash, err := r.decodeLine(line, shallow, shallowLineLen) + if err != nil { + return err + } + + r.Shallows = append(r.Shallows, hash) + return nil +} + +func (r *ShallowUpdate) decodeUnshallowLine(line []byte) error { + hash, err := r.decodeLine(line, unshallow, unshallowLineLen) + if err != nil { + return err + } + + r.Unshallows = append(r.Unshallows, hash) + return nil +} + +func (r *ShallowUpdate) decodeLine(line, prefix []byte, expLen int) (plumbing.Hash, error) { + if len(line) != expLen { + return plumbing.ZeroHash, fmt.Errorf("malformed %s%q", prefix, line) + } + + raw := string(line[expLen-40 : expLen]) + return plumbing.NewHash(raw), nil +} + +func (r *ShallowUpdate) Encode(w io.Writer) error { + e := pktline.NewEncoder(w) + + for _, h := range r.Shallows { + if err := e.Encodef("%s%s\n", shallow, h.String()); err != nil { + return err + } + } + + for _, h := range r.Unshallows { + if err := e.Encodef("%s%s\n", unshallow, h.String()); err != nil { + return err + } + } + + return e.Flush() +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/common.go new file mode 100644 index 0000000000..de5001281f --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/common.go @@ -0,0 +1,33 @@ +package sideband + +// Type sideband type "side-band" or "side-band-64k" +type Type int8 + +const ( + // Sideband legacy sideband type up to 1000-byte messages + Sideband Type = iota + // Sideband64k sideband type up to 65519-byte messages + Sideband64k Type = iota + + // MaxPackedSize for Sideband type + MaxPackedSize = 1000 + // MaxPackedSize64k for Sideband64k type + MaxPackedSize64k = 65520 +) + +// Channel sideband channel +type Channel byte + +// WithPayload encode the payload as a message +func (ch Channel) WithPayload(payload []byte) []byte { + return append([]byte{byte(ch)}, payload...) +} + +const ( + // PackData packfile content + PackData Channel = 1 + // ProgressMessage progress messages + ProgressMessage Channel = 2 + // ErrorMessage fatal error message just before stream aborts + ErrorMessage Channel = 3 +) diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/demux.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/demux.go new file mode 100644 index 0000000000..0116f962ef --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/demux.go @@ -0,0 +1,148 @@ +package sideband + +import ( + "errors" + "fmt" + "io" + + "github.com/go-git/go-git/v5/plumbing/format/pktline" +) + +// ErrMaxPackedExceeded returned by Read, if the maximum packed size is exceeded +var ErrMaxPackedExceeded = errors.New("max. packed size exceeded") + +// Progress where the progress information is stored +type Progress interface { + io.Writer +} + +// Demuxer demultiplexes the progress reports and error info interleaved with the +// packfile itself. +// +// A sideband has three different channels the main one, called PackData, contains +// the packfile data; the ErrorMessage channel, that contains server errors; and +// the last one, ProgressMessage channel, containing information about the ongoing +// task happening in the server (optional, can be suppressed sending NoProgress +// or Quiet capabilities to the server) +// +// In order to demultiplex the data stream, method `Read` should be called to +// retrieve the PackData channel, the incoming data from the ProgressMessage is +// written at `Progress` (if any), if any message is retrieved from the +// ErrorMessage channel an error is returned and we can assume that the +// connection has been closed. +type Demuxer struct { + t Type + r io.Reader + s *pktline.Scanner + + max int + pending []byte + + // Progress is where the progress messages are stored + Progress Progress +} + +// NewDemuxer returns a new Demuxer for the given t and read from r +func NewDemuxer(t Type, r io.Reader) *Demuxer { + max := MaxPackedSize64k + if t == Sideband { + max = MaxPackedSize + } + + return &Demuxer{ + t: t, + r: r, + max: max, + s: pktline.NewScanner(r), + } +} + +// Read reads up to len(p) bytes from the PackData channel into p, an error can +// be return if an error happens when reading or if a message is sent in the +// ErrorMessage channel. +// +// When a ProgressMessage is read, is not copy to b, instead of this is written +// to the Progress +func (d *Demuxer) Read(b []byte) (n int, err error) { + var read, req int + + req = len(b) + for read < req { + n, err := d.doRead(b[read:req]) + read += n + + if err != nil { + return read, err + } + } + + return read, nil +} + +func (d *Demuxer) doRead(b []byte) (int, error) { + read, err := d.nextPackData() + size := len(read) + wanted := len(b) + + if size > wanted { + d.pending = read[wanted:] + } + + if wanted > size { + wanted = size + } + + size = copy(b, read[:wanted]) + return size, err +} + +func (d *Demuxer) nextPackData() ([]byte, error) { + content := d.getPending() + if len(content) != 0 { + return content, nil + } + + if !d.s.Scan() { + if err := d.s.Err(); err != nil { + return nil, err + } + + return nil, io.EOF + } + + content = d.s.Bytes() + + size := len(content) + if size == 0 { + return nil, nil + } else if size > d.max { + return nil, ErrMaxPackedExceeded + } + + switch Channel(content[0]) { + case PackData: + return content[1:], nil + case ProgressMessage: + if d.Progress != nil { + _, err := d.Progress.Write(content[1:]) + return nil, err + } + case ErrorMessage: + return nil, fmt.Errorf("unexpected error: %s", content[1:]) + default: + return nil, fmt.Errorf("unknown channel %s", content) + } + + return nil, nil +} + +func (d *Demuxer) getPending() (b []byte) { + if len(d.pending) == 0 { + return nil + } + + content := d.pending + d.pending = nil + + return content +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/doc.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/doc.go new file mode 100644 index 0000000000..c5d2429529 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/doc.go @@ -0,0 +1,31 @@ +// Package sideband implements a sideband mutiplex/demultiplexer +package sideband + +// If 'side-band' or 'side-band-64k' capabilities have been specified by +// the client, the server will send the packfile data multiplexed. +// +// Either mode indicates that the packfile data will be streamed broken +// up into packets of up to either 1000 bytes in the case of 'side_band', +// or 65520 bytes in the case of 'side_band_64k'. Each packet is made up +// of a leading 4-byte pkt-line length of how much data is in the packet, +// followed by a 1-byte stream code, followed by the actual data. +// +// The stream code can be one of: +// +// 1 - pack data +// 2 - progress messages +// 3 - fatal error message just before stream aborts +// +// The "side-band-64k" capability came about as a way for newer clients +// that can handle much larger packets to request packets that are +// actually crammed nearly full, while maintaining backward compatibility +// for the older clients. +// +// Further, with side-band and its up to 1000-byte messages, it's actually +// 999 bytes of payload and 1 byte for the stream code. With side-band-64k, +// same deal, you have up to 65519 bytes of data and 1 byte for the stream +// code. +// +// The client MUST send only maximum of one of "side-band" and "side- +// band-64k". Server MUST diagnose it as an error if client requests +// both. diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/muxer.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/muxer.go new file mode 100644 index 0000000000..d51ac82695 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/muxer.go @@ -0,0 +1,65 @@ +package sideband + +import ( + "io" + + "github.com/go-git/go-git/v5/plumbing/format/pktline" +) + +// Muxer multiplex the packfile along with the progress messages and the error +// information. The multiplex is perform using pktline format. +type Muxer struct { + max int + e *pktline.Encoder +} + +const chLen = 1 + +// NewMuxer returns a new Muxer for the given t that writes on w. +// +// If t is equal to `Sideband` the max pack size is set to MaxPackedSize, in any +// other value is given, max pack is set to MaxPackedSize64k, that is the +// maximum length of a line in pktline format. +func NewMuxer(t Type, w io.Writer) *Muxer { + max := MaxPackedSize64k + if t == Sideband { + max = MaxPackedSize + } + + return &Muxer{ + max: max - chLen, + e: pktline.NewEncoder(w), + } +} + +// Write writes p in the PackData channel +func (m *Muxer) Write(p []byte) (int, error) { + return m.WriteChannel(PackData, p) +} + +// WriteChannel writes p in the given channel. This method can be used with any +// channel, but is recommend use it only for the ProgressMessage and +// ErrorMessage channels and use Write for the PackData channel +func (m *Muxer) WriteChannel(t Channel, p []byte) (int, error) { + wrote := 0 + size := len(p) + for wrote < size { + n, err := m.doWrite(t, p[wrote:]) + wrote += n + + if err != nil { + return wrote, err + } + } + + return wrote, nil +} + +func (m *Muxer) doWrite(ch Channel, p []byte) (int, error) { + sz := len(p) + if sz > m.max { + sz = m.max + } + + return sz, m.e.Encode(ch.WithPayload(p[:sz])) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go new file mode 100644 index 0000000000..b3a7ee804c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go @@ -0,0 +1,127 @@ +package packp + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/format/pktline" +) + +const ackLineLen = 44 + +// ServerResponse object acknowledgement from upload-pack service +type ServerResponse struct { + ACKs []plumbing.Hash +} + +// Decode decodes the response into the struct, isMultiACK should be true, if +// the request was done with multi_ack or multi_ack_detailed capabilities. +func (r *ServerResponse) Decode(reader *bufio.Reader, isMultiACK bool) error { + // TODO: implement support for multi_ack or multi_ack_detailed responses + if isMultiACK { + return errors.New("multi_ack and multi_ack_detailed are not supported") + } + + s := pktline.NewScanner(reader) + + for s.Scan() { + line := s.Bytes() + + if err := r.decodeLine(line); err != nil { + return err + } + + // we need to detect when the end of a response header and the beginning + // of a packfile header happened, some requests to the git daemon + // produces a duplicate ACK header even when multi_ack is not supported. + stop, err := r.stopReading(reader) + if err != nil { + return err + } + + if stop { + break + } + } + + return s.Err() +} + +// stopReading detects when a valid command such as ACK or NAK is found to be +// read in the buffer without moving the read pointer. +func (r *ServerResponse) stopReading(reader *bufio.Reader) (bool, error) { + ahead, err := reader.Peek(7) + if err == io.EOF { + return true, nil + } + + if err != nil { + return false, err + } + + if len(ahead) > 4 && r.isValidCommand(ahead[0:3]) { + return false, nil + } + + if len(ahead) == 7 && r.isValidCommand(ahead[4:]) { + return false, nil + } + + return true, nil +} + +func (r *ServerResponse) isValidCommand(b []byte) bool { + commands := [][]byte{ack, nak} + for _, c := range commands { + if bytes.Equal(b, c) { + return true + } + } + + return false +} + +func (r *ServerResponse) decodeLine(line []byte) error { + if len(line) == 0 { + return fmt.Errorf("unexpected flush") + } + + if bytes.Equal(line[0:3], ack) { + return r.decodeACKLine(line) + } + + if bytes.Equal(line[0:3], nak) { + return nil + } + + return fmt.Errorf("unexpected content %q", string(line)) +} + +func (r *ServerResponse) decodeACKLine(line []byte) error { + if len(line) < ackLineLen { + return fmt.Errorf("malformed ACK %q", line) + } + + sp := bytes.Index(line, []byte(" ")) + h := plumbing.NewHash(string(line[sp+1 : sp+41])) + r.ACKs = append(r.ACKs, h) + return nil +} + +// Encode encodes the ServerResponse into a writer. +func (r *ServerResponse) Encode(w io.Writer) error { + if len(r.ACKs) > 1 { + return errors.New("multi_ack and multi_ack_detailed are not supported") + } + + e := pktline.NewEncoder(w) + if len(r.ACKs) == 0 { + return e.Encodef("%s\n", nak) + } + + return e.Encodef("%s %s\n", ack, r.ACKs[0].String()) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq.go new file mode 100644 index 0000000000..ddec06e996 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq.go @@ -0,0 +1,168 @@ +package packp + +import ( + "fmt" + "time" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" +) + +// UploadRequest values represent the information transmitted on a +// upload-request message. Values from this type are not zero-value +// safe, use the New function instead. +// This is a low level type, use UploadPackRequest instead. +type UploadRequest struct { + Capabilities *capability.List + Wants []plumbing.Hash + Shallows []plumbing.Hash + Depth Depth +} + +// Depth values stores the desired depth of the requested packfile: see +// DepthCommit, DepthSince and DepthReference. +type Depth interface { + isDepth() + IsZero() bool +} + +// DepthCommits values stores the maximum number of requested commits in +// the packfile. Zero means infinite. A negative value will have +// undefined consequences. +type DepthCommits int + +func (d DepthCommits) isDepth() {} + +func (d DepthCommits) IsZero() bool { + return d == 0 +} + +// DepthSince values requests only commits newer than the specified time. +type DepthSince time.Time + +func (d DepthSince) isDepth() {} + +func (d DepthSince) IsZero() bool { + return time.Time(d).IsZero() +} + +// DepthReference requests only commits not to found in the specified reference. +type DepthReference string + +func (d DepthReference) isDepth() {} + +func (d DepthReference) IsZero() bool { + return string(d) == "" +} + +// NewUploadRequest returns a pointer to a new UploadRequest value, ready to be +// used. It has no capabilities, wants or shallows and an infinite depth. Please +// note that to encode an upload-request it has to have at least one wanted hash. +func NewUploadRequest() *UploadRequest { + return &UploadRequest{ + Capabilities: capability.NewList(), + Wants: []plumbing.Hash{}, + Shallows: []plumbing.Hash{}, + Depth: DepthCommits(0), + } +} + +// NewUploadRequestFromCapabilities returns a pointer to a new UploadRequest +// value, the request capabilities are filled with the most optimal ones, based +// on the adv value (advertised capabilities), the UploadRequest generated it +// has no wants or shallows and an infinite depth. +func NewUploadRequestFromCapabilities(adv *capability.List) *UploadRequest { + r := NewUploadRequest() + + if adv.Supports(capability.MultiACKDetailed) { + r.Capabilities.Set(capability.MultiACKDetailed) + } else if adv.Supports(capability.MultiACK) { + r.Capabilities.Set(capability.MultiACK) + } + + if adv.Supports(capability.Sideband64k) { + r.Capabilities.Set(capability.Sideband64k) + } else if adv.Supports(capability.Sideband) { + r.Capabilities.Set(capability.Sideband) + } + + if adv.Supports(capability.ThinPack) { + r.Capabilities.Set(capability.ThinPack) + } + + if adv.Supports(capability.OFSDelta) { + r.Capabilities.Set(capability.OFSDelta) + } + + if adv.Supports(capability.Agent) { + r.Capabilities.Set(capability.Agent, capability.DefaultAgent) + } + + return r +} + +// Validate validates the content of UploadRequest, following the next rules: +// - Wants MUST have at least one reference +// - capability.Shallow MUST be present if Shallows is not empty +// - is a non-zero DepthCommits is given capability.Shallow MUST be present +// - is a DepthSince is given capability.Shallow MUST be present +// - is a DepthReference is given capability.DeepenNot MUST be present +// - MUST contain only maximum of one of capability.Sideband and capability.Sideband64k +// - MUST contain only maximum of one of capability.MultiACK and capability.MultiACKDetailed +func (req *UploadRequest) Validate() error { + if len(req.Wants) == 0 { + return fmt.Errorf("want can't be empty") + } + + if err := req.validateRequiredCapabilities(); err != nil { + return err + } + + if err := req.validateConflictCapabilities(); err != nil { + return err + } + + return nil +} + +func (req *UploadRequest) validateRequiredCapabilities() error { + msg := "missing capability %s" + + if len(req.Shallows) != 0 && !req.Capabilities.Supports(capability.Shallow) { + return fmt.Errorf(msg, capability.Shallow) + } + + switch req.Depth.(type) { + case DepthCommits: + if req.Depth != DepthCommits(0) { + if !req.Capabilities.Supports(capability.Shallow) { + return fmt.Errorf(msg, capability.Shallow) + } + } + case DepthSince: + if !req.Capabilities.Supports(capability.DeepenSince) { + return fmt.Errorf(msg, capability.DeepenSince) + } + case DepthReference: + if !req.Capabilities.Supports(capability.DeepenNot) { + return fmt.Errorf(msg, capability.DeepenNot) + } + } + + return nil +} + +func (req *UploadRequest) validateConflictCapabilities() error { + msg := "capabilities %s and %s are mutually exclusive" + if req.Capabilities.Supports(capability.Sideband) && + req.Capabilities.Supports(capability.Sideband64k) { + return fmt.Errorf(msg, capability.Sideband, capability.Sideband64k) + } + + if req.Capabilities.Supports(capability.MultiACK) && + req.Capabilities.Supports(capability.MultiACKDetailed) { + return fmt.Errorf(msg, capability.MultiACK, capability.MultiACKDetailed) + } + + return nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_decode.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_decode.go new file mode 100644 index 0000000000..895a3bf6dd --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_decode.go @@ -0,0 +1,257 @@ +package packp + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "strconv" + "time" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/format/pktline" +) + +// Decode reads the next upload-request form its input and +// stores it in the UploadRequest. +func (req *UploadRequest) Decode(r io.Reader) error { + d := newUlReqDecoder(r) + return d.Decode(req) +} + +type ulReqDecoder struct { + s *pktline.Scanner // a pkt-line scanner from the input stream + line []byte // current pkt-line contents, use parser.nextLine() to make it advance + nLine int // current pkt-line number for debugging, begins at 1 + err error // sticky error, use the parser.error() method to fill this out + data *UploadRequest // parsed data is stored here +} + +func newUlReqDecoder(r io.Reader) *ulReqDecoder { + return &ulReqDecoder{ + s: pktline.NewScanner(r), + } +} + +func (d *ulReqDecoder) Decode(v *UploadRequest) error { + d.data = v + + for state := d.decodeFirstWant; state != nil; { + state = state() + } + + return d.err +} + +// fills out the parser stiky error +func (d *ulReqDecoder) error(format string, a ...interface{}) { + msg := fmt.Sprintf( + "pkt-line %d: %s", d.nLine, + fmt.Sprintf(format, a...), + ) + + d.err = NewErrUnexpectedData(msg, d.line) +} + +// Reads a new pkt-line from the scanner, makes its payload available as +// p.line and increments p.nLine. A successful invocation returns true, +// otherwise, false is returned and the sticky error is filled out +// accordingly. Trims eols at the end of the payloads. +func (d *ulReqDecoder) nextLine() bool { + d.nLine++ + + if !d.s.Scan() { + if d.err = d.s.Err(); d.err != nil { + return false + } + + d.error("EOF") + return false + } + + d.line = d.s.Bytes() + d.line = bytes.TrimSuffix(d.line, eol) + + return true +} + +// Expected format: want [ capabilities] +func (d *ulReqDecoder) decodeFirstWant() stateFn { + if ok := d.nextLine(); !ok { + return nil + } + + if !bytes.HasPrefix(d.line, want) { + d.error("missing 'want ' prefix") + return nil + } + d.line = bytes.TrimPrefix(d.line, want) + + hash, ok := d.readHash() + if !ok { + return nil + } + d.data.Wants = append(d.data.Wants, hash) + + return d.decodeCaps +} + +func (d *ulReqDecoder) readHash() (plumbing.Hash, bool) { + if len(d.line) < hashSize { + d.err = fmt.Errorf("malformed hash: %v", d.line) + return plumbing.ZeroHash, false + } + + var hash plumbing.Hash + if _, err := hex.Decode(hash[:], d.line[:hashSize]); err != nil { + d.error("invalid hash text: %s", err) + return plumbing.ZeroHash, false + } + d.line = d.line[hashSize:] + + return hash, true +} + +// Expected format: sp cap1 sp cap2 sp cap3... +func (d *ulReqDecoder) decodeCaps() stateFn { + d.line = bytes.TrimPrefix(d.line, sp) + if err := d.data.Capabilities.Decode(d.line); err != nil { + d.error("invalid capabilities: %s", err) + } + + return d.decodeOtherWants +} + +// Expected format: want +func (d *ulReqDecoder) decodeOtherWants() stateFn { + if ok := d.nextLine(); !ok { + return nil + } + + if bytes.HasPrefix(d.line, shallow) { + return d.decodeShallow + } + + if bytes.HasPrefix(d.line, deepen) { + return d.decodeDeepen + } + + if len(d.line) == 0 { + return nil + } + + if !bytes.HasPrefix(d.line, want) { + d.error("unexpected payload while expecting a want: %q", d.line) + return nil + } + d.line = bytes.TrimPrefix(d.line, want) + + hash, ok := d.readHash() + if !ok { + return nil + } + d.data.Wants = append(d.data.Wants, hash) + + return d.decodeOtherWants +} + +// Expected format: shallow +func (d *ulReqDecoder) decodeShallow() stateFn { + if bytes.HasPrefix(d.line, deepen) { + return d.decodeDeepen + } + + if len(d.line) == 0 { + return nil + } + + if !bytes.HasPrefix(d.line, shallow) { + d.error("unexpected payload while expecting a shallow: %q", d.line) + return nil + } + d.line = bytes.TrimPrefix(d.line, shallow) + + hash, ok := d.readHash() + if !ok { + return nil + } + d.data.Shallows = append(d.data.Shallows, hash) + + if ok := d.nextLine(); !ok { + return nil + } + + return d.decodeShallow +} + +// Expected format: deepen / deepen-since
    / deepen-not +func (d *ulReqDecoder) decodeDeepen() stateFn { + if bytes.HasPrefix(d.line, deepenCommits) { + return d.decodeDeepenCommits + } + + if bytes.HasPrefix(d.line, deepenSince) { + return d.decodeDeepenSince + } + + if bytes.HasPrefix(d.line, deepenReference) { + return d.decodeDeepenReference + } + + if len(d.line) == 0 { + return nil + } + + d.error("unexpected deepen specification: %q", d.line) + return nil +} + +func (d *ulReqDecoder) decodeDeepenCommits() stateFn { + d.line = bytes.TrimPrefix(d.line, deepenCommits) + + var n int + if n, d.err = strconv.Atoi(string(d.line)); d.err != nil { + return nil + } + if n < 0 { + d.err = fmt.Errorf("negative depth") + return nil + } + d.data.Depth = DepthCommits(n) + + return d.decodeFlush +} + +func (d *ulReqDecoder) decodeDeepenSince() stateFn { + d.line = bytes.TrimPrefix(d.line, deepenSince) + + var secs int64 + secs, d.err = strconv.ParseInt(string(d.line), 10, 64) + if d.err != nil { + return nil + } + t := time.Unix(secs, 0).UTC() + d.data.Depth = DepthSince(t) + + return d.decodeFlush +} + +func (d *ulReqDecoder) decodeDeepenReference() stateFn { + d.line = bytes.TrimPrefix(d.line, deepenReference) + + d.data.Depth = DepthReference(string(d.line)) + + return d.decodeFlush +} + +func (d *ulReqDecoder) decodeFlush() stateFn { + if ok := d.nextLine(); !ok { + return nil + } + + if len(d.line) != 0 { + d.err = fmt.Errorf("unexpected payload while expecting a flush-pkt: %q", d.line) + } + + return nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_encode.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_encode.go new file mode 100644 index 0000000000..c451e23164 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_encode.go @@ -0,0 +1,145 @@ +package packp + +import ( + "bytes" + "fmt" + "io" + "time" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/format/pktline" +) + +// Encode writes the UlReq encoding of u to the stream. +// +// All the payloads will end with a newline character. Wants and +// shallows are sorted alphabetically. A depth of 0 means no depth +// request is sent. +func (req *UploadRequest) Encode(w io.Writer) error { + e := newUlReqEncoder(w) + return e.Encode(req) +} + +type ulReqEncoder struct { + pe *pktline.Encoder // where to write the encoded data + data *UploadRequest // the data to encode + err error // sticky error +} + +func newUlReqEncoder(w io.Writer) *ulReqEncoder { + return &ulReqEncoder{ + pe: pktline.NewEncoder(w), + } +} + +func (e *ulReqEncoder) Encode(v *UploadRequest) error { + e.data = v + + if len(v.Wants) == 0 { + return fmt.Errorf("empty wants provided") + } + + plumbing.HashesSort(e.data.Wants) + for state := e.encodeFirstWant; state != nil; { + state = state() + } + + return e.err +} + +func (e *ulReqEncoder) encodeFirstWant() stateFn { + var err error + if e.data.Capabilities.IsEmpty() { + err = e.pe.Encodef("want %s\n", e.data.Wants[0]) + } else { + err = e.pe.Encodef( + "want %s %s\n", + e.data.Wants[0], + e.data.Capabilities.String(), + ) + } + + if err != nil { + e.err = fmt.Errorf("encoding first want line: %s", err) + return nil + } + + return e.encodeAdditionalWants +} + +func (e *ulReqEncoder) encodeAdditionalWants() stateFn { + last := e.data.Wants[0] + for _, w := range e.data.Wants[1:] { + if bytes.Equal(last[:], w[:]) { + continue + } + + if err := e.pe.Encodef("want %s\n", w); err != nil { + e.err = fmt.Errorf("encoding want %q: %s", w, err) + return nil + } + + last = w + } + + return e.encodeShallows +} + +func (e *ulReqEncoder) encodeShallows() stateFn { + plumbing.HashesSort(e.data.Shallows) + + var last plumbing.Hash + for _, s := range e.data.Shallows { + if bytes.Equal(last[:], s[:]) { + continue + } + + if err := e.pe.Encodef("shallow %s\n", s); err != nil { + e.err = fmt.Errorf("encoding shallow %q: %s", s, err) + return nil + } + + last = s + } + + return e.encodeDepth +} + +func (e *ulReqEncoder) encodeDepth() stateFn { + switch depth := e.data.Depth.(type) { + case DepthCommits: + if depth != 0 { + commits := int(depth) + if err := e.pe.Encodef("deepen %d\n", commits); err != nil { + e.err = fmt.Errorf("encoding depth %d: %s", depth, err) + return nil + } + } + case DepthSince: + when := time.Time(depth).UTC() + if err := e.pe.Encodef("deepen-since %d\n", when.Unix()); err != nil { + e.err = fmt.Errorf("encoding depth %s: %s", when, err) + return nil + } + case DepthReference: + reference := string(depth) + if err := e.pe.Encodef("deepen-not %s\n", reference); err != nil { + e.err = fmt.Errorf("encoding depth %s: %s", reference, err) + return nil + } + default: + e.err = fmt.Errorf("unsupported depth type") + return nil + } + + return e.encodeFlush +} + +func (e *ulReqEncoder) encodeFlush() stateFn { + if err := e.pe.Flush(); err != nil { + e.err = fmt.Errorf("encoding flush-pkt: %s", err) + return nil + } + + return nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq.go new file mode 100644 index 0000000000..4d927d8b8e --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq.go @@ -0,0 +1,122 @@ +package packp + +import ( + "errors" + "io" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" + "github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband" +) + +var ( + ErrEmptyCommands = errors.New("commands cannot be empty") + ErrMalformedCommand = errors.New("malformed command") +) + +// ReferenceUpdateRequest values represent reference upload requests. +// Values from this type are not zero-value safe, use the New function instead. +type ReferenceUpdateRequest struct { + Capabilities *capability.List + Commands []*Command + Shallow *plumbing.Hash + // Packfile contains an optional packfile reader. + Packfile io.ReadCloser + + // Progress receives sideband progress messages from the server + Progress sideband.Progress +} + +// New returns a pointer to a new ReferenceUpdateRequest value. +func NewReferenceUpdateRequest() *ReferenceUpdateRequest { + return &ReferenceUpdateRequest{ + // TODO: Add support for push-cert + Capabilities: capability.NewList(), + Commands: nil, + } +} + +// NewReferenceUpdateRequestFromCapabilities returns a pointer to a new +// ReferenceUpdateRequest value, the request capabilities are filled with the +// most optimal ones, based on the adv value (advertised capabilities), the +// ReferenceUpdateRequest contains no commands +// +// It does set the following capabilities: +// - agent +// - report-status +// - ofs-delta +// - ref-delta +// - delete-refs +// It leaves up to the user to add the following capabilities later: +// - atomic +// - ofs-delta +// - side-band +// - side-band-64k +// - quiet +// - push-cert +func NewReferenceUpdateRequestFromCapabilities(adv *capability.List) *ReferenceUpdateRequest { + r := NewReferenceUpdateRequest() + + if adv.Supports(capability.Agent) { + r.Capabilities.Set(capability.Agent, capability.DefaultAgent) + } + + if adv.Supports(capability.ReportStatus) { + r.Capabilities.Set(capability.ReportStatus) + } + + return r +} + +func (req *ReferenceUpdateRequest) validate() error { + if len(req.Commands) == 0 { + return ErrEmptyCommands + } + + for _, c := range req.Commands { + if err := c.validate(); err != nil { + return err + } + } + + return nil +} + +type Action string + +const ( + Create Action = "create" + Update = "update" + Delete = "delete" + Invalid = "invalid" +) + +type Command struct { + Name plumbing.ReferenceName + Old plumbing.Hash + New plumbing.Hash +} + +func (c *Command) Action() Action { + if c.Old == plumbing.ZeroHash && c.New == plumbing.ZeroHash { + return Invalid + } + + if c.Old == plumbing.ZeroHash { + return Create + } + + if c.New == plumbing.ZeroHash { + return Delete + } + + return Update +} + +func (c *Command) validate() error { + if c.Action() == Invalid { + return ErrMalformedCommand + } + + return nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_decode.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_decode.go new file mode 100644 index 0000000000..2c9843a566 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_decode.go @@ -0,0 +1,250 @@ +package packp + +import ( + "bytes" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/format/pktline" +) + +var ( + shallowLineLength = len(shallow) + hashSize + minCommandLength = hashSize*2 + 2 + 1 + minCommandAndCapsLength = minCommandLength + 1 +) + +var ( + ErrEmpty = errors.New("empty update-request message") + errNoCommands = errors.New("unexpected EOF before any command") + errMissingCapabilitiesDelimiter = errors.New("capabilities delimiter not found") +) + +func errMalformedRequest(reason string) error { + return fmt.Errorf("malformed request: %s", reason) +} + +func errInvalidHashSize(got int) error { + return fmt.Errorf("invalid hash size: expected %d, got %d", + hashSize, got) +} + +func errInvalidHash(err error) error { + return fmt.Errorf("invalid hash: %s", err.Error()) +} + +func errInvalidShallowLineLength(got int) error { + return errMalformedRequest(fmt.Sprintf( + "invalid shallow line length: expected %d, got %d", + shallowLineLength, got)) +} + +func errInvalidCommandCapabilitiesLineLength(got int) error { + return errMalformedRequest(fmt.Sprintf( + "invalid command and capabilities line length: expected at least %d, got %d", + minCommandAndCapsLength, got)) +} + +func errInvalidCommandLineLength(got int) error { + return errMalformedRequest(fmt.Sprintf( + "invalid command line length: expected at least %d, got %d", + minCommandLength, got)) +} + +func errInvalidShallowObjId(err error) error { + return errMalformedRequest( + fmt.Sprintf("invalid shallow object id: %s", err.Error())) +} + +func errInvalidOldObjId(err error) error { + return errMalformedRequest( + fmt.Sprintf("invalid old object id: %s", err.Error())) +} + +func errInvalidNewObjId(err error) error { + return errMalformedRequest( + fmt.Sprintf("invalid new object id: %s", err.Error())) +} + +func errMalformedCommand(err error) error { + return errMalformedRequest(fmt.Sprintf( + "malformed command: %s", err.Error())) +} + +// Decode reads the next update-request message form the reader and wr +func (req *ReferenceUpdateRequest) Decode(r io.Reader) error { + var rc io.ReadCloser + var ok bool + rc, ok = r.(io.ReadCloser) + if !ok { + rc = ioutil.NopCloser(r) + } + + d := &updReqDecoder{r: rc, s: pktline.NewScanner(r)} + return d.Decode(req) +} + +type updReqDecoder struct { + r io.ReadCloser + s *pktline.Scanner + req *ReferenceUpdateRequest +} + +func (d *updReqDecoder) Decode(req *ReferenceUpdateRequest) error { + d.req = req + funcs := []func() error{ + d.scanLine, + d.decodeShallow, + d.decodeCommandAndCapabilities, + d.decodeCommands, + d.setPackfile, + req.validate, + } + + for _, f := range funcs { + if err := f(); err != nil { + return err + } + } + + return nil +} + +func (d *updReqDecoder) scanLine() error { + if ok := d.s.Scan(); !ok { + return d.scanErrorOr(ErrEmpty) + } + + return nil +} + +func (d *updReqDecoder) decodeShallow() error { + b := d.s.Bytes() + + if !bytes.HasPrefix(b, shallowNoSp) { + return nil + } + + if len(b) != shallowLineLength { + return errInvalidShallowLineLength(len(b)) + } + + h, err := parseHash(string(b[len(shallow):])) + if err != nil { + return errInvalidShallowObjId(err) + } + + if ok := d.s.Scan(); !ok { + return d.scanErrorOr(errNoCommands) + } + + d.req.Shallow = &h + + return nil +} + +func (d *updReqDecoder) decodeCommands() error { + for { + b := d.s.Bytes() + if bytes.Equal(b, pktline.Flush) { + return nil + } + + c, err := parseCommand(b) + if err != nil { + return err + } + + d.req.Commands = append(d.req.Commands, c) + + if ok := d.s.Scan(); !ok { + return d.s.Err() + } + } +} + +func (d *updReqDecoder) decodeCommandAndCapabilities() error { + b := d.s.Bytes() + i := bytes.IndexByte(b, 0) + if i == -1 { + return errMissingCapabilitiesDelimiter + } + + if len(b) < minCommandAndCapsLength { + return errInvalidCommandCapabilitiesLineLength(len(b)) + } + + cmd, err := parseCommand(b[:i]) + if err != nil { + return err + } + + d.req.Commands = append(d.req.Commands, cmd) + + if err := d.req.Capabilities.Decode(b[i+1:]); err != nil { + return err + } + + if err := d.scanLine(); err != nil { + return err + } + + return nil +} + +func (d *updReqDecoder) setPackfile() error { + d.req.Packfile = d.r + + return nil +} + +func parseCommand(b []byte) (*Command, error) { + if len(b) < minCommandLength { + return nil, errInvalidCommandLineLength(len(b)) + } + + var ( + os, ns string + n plumbing.ReferenceName + ) + if _, err := fmt.Sscanf(string(b), "%s %s %s", &os, &ns, &n); err != nil { + return nil, errMalformedCommand(err) + } + + oh, err := parseHash(os) + if err != nil { + return nil, errInvalidOldObjId(err) + } + + nh, err := parseHash(ns) + if err != nil { + return nil, errInvalidNewObjId(err) + } + + return &Command{Old: oh, New: nh, Name: n}, nil +} + +func parseHash(s string) (plumbing.Hash, error) { + if len(s) != hashSize { + return plumbing.ZeroHash, errInvalidHashSize(len(s)) + } + + if _, err := hex.DecodeString(s); err != nil { + return plumbing.ZeroHash, errInvalidHash(err) + } + + h := plumbing.NewHash(s) + return h, nil +} + +func (d *updReqDecoder) scanErrorOr(origErr error) error { + if err := d.s.Err(); err != nil { + return err + } + + return origErr +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_encode.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_encode.go new file mode 100644 index 0000000000..2545e935e6 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_encode.go @@ -0,0 +1,75 @@ +package packp + +import ( + "fmt" + "io" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/format/pktline" + "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" +) + +var ( + zeroHashString = plumbing.ZeroHash.String() +) + +// Encode writes the ReferenceUpdateRequest encoding to the stream. +func (req *ReferenceUpdateRequest) Encode(w io.Writer) error { + if err := req.validate(); err != nil { + return err + } + + e := pktline.NewEncoder(w) + + if err := req.encodeShallow(e, req.Shallow); err != nil { + return err + } + + if err := req.encodeCommands(e, req.Commands, req.Capabilities); err != nil { + return err + } + + if req.Packfile != nil { + if _, err := io.Copy(w, req.Packfile); err != nil { + return err + } + + return req.Packfile.Close() + } + + return nil +} + +func (req *ReferenceUpdateRequest) encodeShallow(e *pktline.Encoder, + h *plumbing.Hash) error { + + if h == nil { + return nil + } + + objId := []byte(h.String()) + return e.Encodef("%s%s", shallow, objId) +} + +func (req *ReferenceUpdateRequest) encodeCommands(e *pktline.Encoder, + cmds []*Command, cap *capability.List) error { + + if err := e.Encodef("%s\x00%s", + formatCommand(cmds[0]), cap.String()); err != nil { + return err + } + + for _, cmd := range cmds[1:] { + if err := e.Encodef(formatCommand(cmd)); err != nil { + return err + } + } + + return e.Flush() +} + +func formatCommand(cmd *Command) string { + o := cmd.Old.String() + n := cmd.New.String() + return fmt.Sprintf("%s %s %s", o, n, cmd.Name) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackreq.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackreq.go new file mode 100644 index 0000000000..de2206b3fc --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackreq.go @@ -0,0 +1,98 @@ +package packp + +import ( + "bytes" + "fmt" + "io" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/format/pktline" + "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" +) + +// UploadPackRequest represents a upload-pack request. +// Zero-value is not safe, use NewUploadPackRequest instead. +type UploadPackRequest struct { + UploadRequest + UploadHaves +} + +// NewUploadPackRequest creates a new UploadPackRequest and returns a pointer. +func NewUploadPackRequest() *UploadPackRequest { + ur := NewUploadRequest() + return &UploadPackRequest{ + UploadHaves: UploadHaves{}, + UploadRequest: *ur, + } +} + +// NewUploadPackRequestFromCapabilities creates a new UploadPackRequest and +// returns a pointer. The request capabilities are filled with the most optimal +// ones, based on the adv value (advertised capabilities), the UploadPackRequest +// it has no wants, haves or shallows and an infinite depth +func NewUploadPackRequestFromCapabilities(adv *capability.List) *UploadPackRequest { + ur := NewUploadRequestFromCapabilities(adv) + return &UploadPackRequest{ + UploadHaves: UploadHaves{}, + UploadRequest: *ur, + } +} + +// IsEmpty a request if empty if Haves are contained in the Wants, or if Wants +// length is zero +func (r *UploadPackRequest) IsEmpty() bool { + return isSubset(r.Wants, r.Haves) +} + +func isSubset(needle []plumbing.Hash, haystack []plumbing.Hash) bool { + for _, h := range needle { + found := false + for _, oh := range haystack { + if h == oh { + found = true + break + } + } + + if !found { + return false + } + } + + return true +} + +// UploadHaves is a message to signal the references that a client has in a +// upload-pack. Do not use this directly. Use UploadPackRequest request instead. +type UploadHaves struct { + Haves []plumbing.Hash +} + +// Encode encodes the UploadHaves into the Writer. If flush is true, a flush +// command will be encoded at the end of the writer content. +func (u *UploadHaves) Encode(w io.Writer, flush bool) error { + e := pktline.NewEncoder(w) + + plumbing.HashesSort(u.Haves) + + var last plumbing.Hash + for _, have := range u.Haves { + if bytes.Equal(last[:], have[:]) { + continue + } + + if err := e.Encodef("have %s\n", have); err != nil { + return fmt.Errorf("sending haves for %q: %s", have, err) + } + + last = have + } + + if flush && len(u.Haves) != 0 { + if err := e.Flush(); err != nil { + return fmt.Errorf("sending flush-pkt after haves: %s", err) + } + } + + return nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackresp.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackresp.go new file mode 100644 index 0000000000..a9a7192ea5 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackresp.go @@ -0,0 +1,109 @@ +package packp + +import ( + "errors" + "io" + + "bufio" + + "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +// ErrUploadPackResponseNotDecoded is returned if Read is called without +// decoding first +var ErrUploadPackResponseNotDecoded = errors.New("upload-pack-response should be decoded") + +// UploadPackResponse contains all the information responded by the upload-pack +// service, the response implements io.ReadCloser that allows to read the +// packfile directly from it. +type UploadPackResponse struct { + ShallowUpdate + ServerResponse + + r io.ReadCloser + isShallow bool + isMultiACK bool + isOk bool +} + +// NewUploadPackResponse create a new UploadPackResponse instance, the request +// being responded by the response is required. +func NewUploadPackResponse(req *UploadPackRequest) *UploadPackResponse { + isShallow := !req.Depth.IsZero() + isMultiACK := req.Capabilities.Supports(capability.MultiACK) || + req.Capabilities.Supports(capability.MultiACKDetailed) + + return &UploadPackResponse{ + isShallow: isShallow, + isMultiACK: isMultiACK, + } +} + +// NewUploadPackResponseWithPackfile creates a new UploadPackResponse instance, +// and sets its packfile reader. +func NewUploadPackResponseWithPackfile(req *UploadPackRequest, + pf io.ReadCloser) *UploadPackResponse { + + r := NewUploadPackResponse(req) + r.r = pf + return r +} + +// Decode decodes all the responses sent by upload-pack service into the struct +// and prepares it to read the packfile using the Read method +func (r *UploadPackResponse) Decode(reader io.ReadCloser) error { + buf := bufio.NewReader(reader) + + if r.isShallow { + if err := r.ShallowUpdate.Decode(buf); err != nil { + return err + } + } + + if err := r.ServerResponse.Decode(buf, r.isMultiACK); err != nil { + return err + } + + // now the reader is ready to read the packfile content + r.r = ioutil.NewReadCloser(buf, reader) + + return nil +} + +// Encode encodes an UploadPackResponse. +func (r *UploadPackResponse) Encode(w io.Writer) (err error) { + if r.isShallow { + if err := r.ShallowUpdate.Encode(w); err != nil { + return err + } + } + + if err := r.ServerResponse.Encode(w); err != nil { + return err + } + + defer ioutil.CheckClose(r.r, &err) + _, err = io.Copy(w, r.r) + return err +} + +// Read reads the packfile data, if the request was done with any Sideband +// capability the content read should be demultiplexed. If the methods wasn't +// called before the ErrUploadPackResponseNotDecoded will be return +func (r *UploadPackResponse) Read(p []byte) (int, error) { + if r.r == nil { + return 0, ErrUploadPackResponseNotDecoded + } + + return r.r.Read(p) +} + +// Close the underlying reader, if any +func (r *UploadPackResponse) Close() error { + if r.r == nil { + return nil + } + + return r.r.Close() +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/reference.go b/vendor/github.com/go-git/go-git/v5/plumbing/reference.go new file mode 100644 index 0000000000..08e908f1f3 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/reference.go @@ -0,0 +1,209 @@ +package plumbing + +import ( + "errors" + "fmt" + "strings" +) + +const ( + refPrefix = "refs/" + refHeadPrefix = refPrefix + "heads/" + refTagPrefix = refPrefix + "tags/" + refRemotePrefix = refPrefix + "remotes/" + refNotePrefix = refPrefix + "notes/" + symrefPrefix = "ref: " +) + +// RefRevParseRules are a set of rules to parse references into short names. +// These are the same rules as used by git in shorten_unambiguous_ref. +// See: https://github.com/git/git/blob/e0aaa1b6532cfce93d87af9bc813fb2e7a7ce9d7/refs.c#L417 +var RefRevParseRules = []string{ + "refs/%s", + "refs/tags/%s", + "refs/heads/%s", + "refs/remotes/%s", + "refs/remotes/%s/HEAD", +} + +var ( + ErrReferenceNotFound = errors.New("reference not found") +) + +// ReferenceType reference type's +type ReferenceType int8 + +const ( + InvalidReference ReferenceType = 0 + HashReference ReferenceType = 1 + SymbolicReference ReferenceType = 2 +) + +func (r ReferenceType) String() string { + switch r { + case InvalidReference: + return "invalid-reference" + case HashReference: + return "hash-reference" + case SymbolicReference: + return "symbolic-reference" + } + + return "" +} + +// ReferenceName reference name's +type ReferenceName string + +// NewBranchReferenceName returns a reference name describing a branch based on +// his short name. +func NewBranchReferenceName(name string) ReferenceName { + return ReferenceName(refHeadPrefix + name) +} + +// NewNoteReferenceName returns a reference name describing a note based on his +// short name. +func NewNoteReferenceName(name string) ReferenceName { + return ReferenceName(refNotePrefix + name) +} + +// NewRemoteReferenceName returns a reference name describing a remote branch +// based on his short name and the remote name. +func NewRemoteReferenceName(remote, name string) ReferenceName { + return ReferenceName(refRemotePrefix + fmt.Sprintf("%s/%s", remote, name)) +} + +// NewRemoteHEADReferenceName returns a reference name describing a the HEAD +// branch of a remote. +func NewRemoteHEADReferenceName(remote string) ReferenceName { + return ReferenceName(refRemotePrefix + fmt.Sprintf("%s/%s", remote, HEAD)) +} + +// NewTagReferenceName returns a reference name describing a tag based on short +// his name. +func NewTagReferenceName(name string) ReferenceName { + return ReferenceName(refTagPrefix + name) +} + +// IsBranch check if a reference is a branch +func (r ReferenceName) IsBranch() bool { + return strings.HasPrefix(string(r), refHeadPrefix) +} + +// IsNote check if a reference is a note +func (r ReferenceName) IsNote() bool { + return strings.HasPrefix(string(r), refNotePrefix) +} + +// IsRemote check if a reference is a remote +func (r ReferenceName) IsRemote() bool { + return strings.HasPrefix(string(r), refRemotePrefix) +} + +// IsTag check if a reference is a tag +func (r ReferenceName) IsTag() bool { + return strings.HasPrefix(string(r), refTagPrefix) +} + +func (r ReferenceName) String() string { + return string(r) +} + +// Short returns the short name of a ReferenceName +func (r ReferenceName) Short() string { + s := string(r) + res := s + for _, format := range RefRevParseRules { + _, err := fmt.Sscanf(s, format, &res) + if err == nil { + continue + } + } + + return res +} + +const ( + HEAD ReferenceName = "HEAD" + Master ReferenceName = "refs/heads/master" +) + +// Reference is a representation of git reference +type Reference struct { + t ReferenceType + n ReferenceName + h Hash + target ReferenceName +} + +// NewReferenceFromStrings creates a reference from name and target as string, +// the resulting reference can be a SymbolicReference or a HashReference base +// on the target provided +func NewReferenceFromStrings(name, target string) *Reference { + n := ReferenceName(name) + + if strings.HasPrefix(target, symrefPrefix) { + target := ReferenceName(target[len(symrefPrefix):]) + return NewSymbolicReference(n, target) + } + + return NewHashReference(n, NewHash(target)) +} + +// NewSymbolicReference creates a new SymbolicReference reference +func NewSymbolicReference(n, target ReferenceName) *Reference { + return &Reference{ + t: SymbolicReference, + n: n, + target: target, + } +} + +// NewHashReference creates a new HashReference reference +func NewHashReference(n ReferenceName, h Hash) *Reference { + return &Reference{ + t: HashReference, + n: n, + h: h, + } +} + +// Type return the type of a reference +func (r *Reference) Type() ReferenceType { + return r.t +} + +// Name return the name of a reference +func (r *Reference) Name() ReferenceName { + return r.n +} + +// Hash return the hash of a hash reference +func (r *Reference) Hash() Hash { + return r.h +} + +// Target return the target of a symbolic reference +func (r *Reference) Target() ReferenceName { + return r.target +} + +// Strings dump a reference as a [2]string +func (r *Reference) Strings() [2]string { + var o [2]string + o[0] = r.Name().String() + + switch r.Type() { + case HashReference: + o[1] = r.Hash().String() + case SymbolicReference: + o[1] = symrefPrefix + r.Target().String() + } + + return o +} + +func (r *Reference) String() string { + s := r.Strings() + return fmt.Sprintf("%s %s", s[1], s[0]) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/revision.go b/vendor/github.com/go-git/go-git/v5/plumbing/revision.go new file mode 100644 index 0000000000..5f053b200c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/revision.go @@ -0,0 +1,11 @@ +package plumbing + +// Revision represents a git revision +// to get more details about git revisions +// please check git manual page : +// https://www.kernel.org/pub/software/scm/git/docs/gitrevisions.html +type Revision string + +func (r Revision) String() string { + return string(r) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/revlist/revlist.go b/vendor/github.com/go-git/go-git/v5/plumbing/revlist/revlist.go new file mode 100644 index 0000000000..b9109870f0 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/revlist/revlist.go @@ -0,0 +1,230 @@ +// Package revlist provides support to access the ancestors of commits, in a +// similar way as the git-rev-list command. +package revlist + +import ( + "fmt" + "io" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/filemode" + "github.com/go-git/go-git/v5/plumbing/object" + "github.com/go-git/go-git/v5/plumbing/storer" +) + +// Objects applies a complementary set. It gets all the hashes from all +// the reachable objects from the given objects. Ignore param are object hashes +// that we want to ignore on the result. All that objects must be accessible +// from the object storer. +func Objects( + s storer.EncodedObjectStorer, + objs, + ignore []plumbing.Hash, +) ([]plumbing.Hash, error) { + return ObjectsWithStorageForIgnores(s, s, objs, ignore) +} + +// ObjectsWithStorageForIgnores is the same as Objects, but a +// secondary storage layer can be provided, to be used to finding the +// full set of objects to be ignored while finding the reachable +// objects. This is useful when the main `s` storage layer is slow +// and/or remote, while the ignore list is available somewhere local. +func ObjectsWithStorageForIgnores( + s, ignoreStore storer.EncodedObjectStorer, + objs, + ignore []plumbing.Hash, +) ([]plumbing.Hash, error) { + ignore, err := objects(ignoreStore, ignore, nil, true) + if err != nil { + return nil, err + } + + return objects(s, objs, ignore, false) +} + +func objects( + s storer.EncodedObjectStorer, + objects, + ignore []plumbing.Hash, + allowMissingObjects bool, +) ([]plumbing.Hash, error) { + seen := hashListToSet(ignore) + result := make(map[plumbing.Hash]bool) + visited := make(map[plumbing.Hash]bool) + + walkerFunc := func(h plumbing.Hash) { + if !seen[h] { + result[h] = true + seen[h] = true + } + } + + for _, h := range objects { + if err := processObject(s, h, seen, visited, ignore, walkerFunc); err != nil { + if allowMissingObjects && err == plumbing.ErrObjectNotFound { + continue + } + + return nil, err + } + } + + return hashSetToList(result), nil +} + +// processObject obtains the object using the hash an process it depending of its type +func processObject( + s storer.EncodedObjectStorer, + h plumbing.Hash, + seen map[plumbing.Hash]bool, + visited map[plumbing.Hash]bool, + ignore []plumbing.Hash, + walkerFunc func(h plumbing.Hash), +) error { + if seen[h] { + return nil + } + + o, err := s.EncodedObject(plumbing.AnyObject, h) + if err != nil { + return err + } + + do, err := object.DecodeObject(s, o) + if err != nil { + return err + } + + switch do := do.(type) { + case *object.Commit: + return reachableObjects(do, seen, visited, ignore, walkerFunc) + case *object.Tree: + return iterateCommitTrees(seen, do, walkerFunc) + case *object.Tag: + walkerFunc(do.Hash) + return processObject(s, do.Target, seen, visited, ignore, walkerFunc) + case *object.Blob: + walkerFunc(do.Hash) + default: + return fmt.Errorf("object type not valid: %s. "+ + "Object reference: %s", o.Type(), o.Hash()) + } + + return nil +} + +// reachableObjects returns, using the callback function, all the reachable +// objects from the specified commit. To avoid to iterate over seen commits, +// if a commit hash is into the 'seen' set, we will not iterate all his trees +// and blobs objects. +func reachableObjects( + commit *object.Commit, + seen map[plumbing.Hash]bool, + visited map[plumbing.Hash]bool, + ignore []plumbing.Hash, + cb func(h plumbing.Hash), +) error { + i := object.NewCommitPreorderIter(commit, seen, ignore) + pending := make(map[plumbing.Hash]bool) + addPendingParents(pending, visited, commit) + for { + commit, err := i.Next() + if err == io.EOF { + break + } + + if err != nil { + return err + } + + if pending[commit.Hash] { + delete(pending, commit.Hash) + } + + addPendingParents(pending, visited, commit) + + if visited[commit.Hash] && len(pending) == 0 { + break + } + + if seen[commit.Hash] { + continue + } + + cb(commit.Hash) + + tree, err := commit.Tree() + if err != nil { + return err + } + + if err := iterateCommitTrees(seen, tree, cb); err != nil { + return err + } + } + + return nil +} + +func addPendingParents(pending, visited map[plumbing.Hash]bool, commit *object.Commit) { + for _, p := range commit.ParentHashes { + if !visited[p] { + pending[p] = true + } + } +} + +// iterateCommitTrees iterate all reachable trees from the given commit +func iterateCommitTrees( + seen map[plumbing.Hash]bool, + tree *object.Tree, + cb func(h plumbing.Hash), +) error { + if seen[tree.Hash] { + return nil + } + + cb(tree.Hash) + + treeWalker := object.NewTreeWalker(tree, true, seen) + + for { + _, e, err := treeWalker.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + + if e.Mode == filemode.Submodule { + continue + } + + if seen[e.Hash] { + continue + } + + cb(e.Hash) + } + + return nil +} + +func hashSetToList(hashes map[plumbing.Hash]bool) []plumbing.Hash { + var result []plumbing.Hash + for key := range hashes { + result = append(result, key) + } + + return result +} + +func hashListToSet(hashes []plumbing.Hash) map[plumbing.Hash]bool { + result := make(map[plumbing.Hash]bool) + for _, h := range hashes { + result[h] = true + } + + return result +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/storer/doc.go b/vendor/github.com/go-git/go-git/v5/plumbing/storer/doc.go new file mode 100644 index 0000000000..4d4f179c61 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/storer/doc.go @@ -0,0 +1,2 @@ +// Package storer defines the interfaces to store objects, references, etc. +package storer diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/storer/index.go b/vendor/github.com/go-git/go-git/v5/plumbing/storer/index.go new file mode 100644 index 0000000000..33113949b3 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/storer/index.go @@ -0,0 +1,9 @@ +package storer + +import "github.com/go-git/go-git/v5/plumbing/format/index" + +// IndexStorer generic storage of index.Index +type IndexStorer interface { + SetIndex(*index.Index) error + Index() (*index.Index, error) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/storer/object.go b/vendor/github.com/go-git/go-git/v5/plumbing/storer/object.go new file mode 100644 index 0000000000..dfe309db16 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/storer/object.go @@ -0,0 +1,288 @@ +package storer + +import ( + "errors" + "io" + "time" + + "github.com/go-git/go-git/v5/plumbing" +) + +var ( + //ErrStop is used to stop a ForEach function in an Iter + ErrStop = errors.New("stop iter") +) + +// EncodedObjectStorer generic storage of objects +type EncodedObjectStorer interface { + // NewEncodedObject returns a new plumbing.EncodedObject, the real type + // of the object can be a custom implementation or the default one, + // plumbing.MemoryObject. + NewEncodedObject() plumbing.EncodedObject + // SetEncodedObject saves an object into the storage, the object should + // be create with the NewEncodedObject, method, and file if the type is + // not supported. + SetEncodedObject(plumbing.EncodedObject) (plumbing.Hash, error) + // EncodedObject gets an object by hash with the given + // plumbing.ObjectType. Implementors should return + // (nil, plumbing.ErrObjectNotFound) if an object doesn't exist with + // both the given hash and object type. + // + // Valid plumbing.ObjectType values are CommitObject, BlobObject, TagObject, + // TreeObject and AnyObject. If plumbing.AnyObject is given, the object must + // be looked up regardless of its type. + EncodedObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error) + // IterObjects returns a custom EncodedObjectStorer over all the object + // on the storage. + // + // Valid plumbing.ObjectType values are CommitObject, BlobObject, TagObject, + IterEncodedObjects(plumbing.ObjectType) (EncodedObjectIter, error) + // HasEncodedObject returns ErrObjNotFound if the object doesn't + // exist. If the object does exist, it returns nil. + HasEncodedObject(plumbing.Hash) error + // EncodedObjectSize returns the plaintext size of the encoded object. + EncodedObjectSize(plumbing.Hash) (int64, error) +} + +// DeltaObjectStorer is an EncodedObjectStorer that can return delta +// objects. +type DeltaObjectStorer interface { + // DeltaObject is the same as EncodedObject but without resolving deltas. + // Deltas will be returned as plumbing.DeltaObject instances. + DeltaObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error) +} + +// Transactioner is a optional method for ObjectStorer, it enable transaction +// base write and read operations in the storage +type Transactioner interface { + // Begin starts a transaction. + Begin() Transaction +} + +// LooseObjectStorer is an optional interface for managing "loose" +// objects, i.e. those not in packfiles. +type LooseObjectStorer interface { + // ForEachObjectHash iterates over all the (loose) object hashes + // in the repository without necessarily having to read those objects. + // Objects only inside pack files may be omitted. + // If ErrStop is sent the iteration is stop but no error is returned. + ForEachObjectHash(func(plumbing.Hash) error) error + // LooseObjectTime looks up the (m)time associated with the + // loose object (that is not in a pack file). Some + // implementations (e.g. without loose objects) + // always return an error. + LooseObjectTime(plumbing.Hash) (time.Time, error) + // DeleteLooseObject deletes a loose object if it exists. + DeleteLooseObject(plumbing.Hash) error +} + +// PackedObjectStorer is an optional interface for managing objects in +// packfiles. +type PackedObjectStorer interface { + // ObjectPacks returns hashes of object packs if the underlying + // implementation has pack files. + ObjectPacks() ([]plumbing.Hash, error) + // DeleteOldObjectPackAndIndex deletes an object pack and the corresponding index file if they exist. + // Deletion is only performed if the pack is older than the supplied time (or the time is zero). + DeleteOldObjectPackAndIndex(plumbing.Hash, time.Time) error +} + +// PackfileWriter is a optional method for ObjectStorer, it enable direct write +// of packfile to the storage +type PackfileWriter interface { + // PackfileWriter returns a writer for writing a packfile to the storage + // + // If the Storer not implements PackfileWriter the objects should be written + // using the Set method. + PackfileWriter() (io.WriteCloser, error) +} + +// EncodedObjectIter is a generic closable interface for iterating over objects. +type EncodedObjectIter interface { + Next() (plumbing.EncodedObject, error) + ForEach(func(plumbing.EncodedObject) error) error + Close() +} + +// Transaction is an in-progress storage transaction. A transaction must end +// with a call to Commit or Rollback. +type Transaction interface { + SetEncodedObject(plumbing.EncodedObject) (plumbing.Hash, error) + EncodedObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error) + Commit() error + Rollback() error +} + +// EncodedObjectLookupIter implements EncodedObjectIter. It iterates over a +// series of object hashes and yields their associated objects by retrieving +// each one from object storage. The retrievals are lazy and only occur when the +// iterator moves forward with a call to Next(). +// +// The EncodedObjectLookupIter must be closed with a call to Close() when it is +// no longer needed. +type EncodedObjectLookupIter struct { + storage EncodedObjectStorer + series []plumbing.Hash + t plumbing.ObjectType + pos int +} + +// NewEncodedObjectLookupIter returns an object iterator given an object storage +// and a slice of object hashes. +func NewEncodedObjectLookupIter( + storage EncodedObjectStorer, t plumbing.ObjectType, series []plumbing.Hash) *EncodedObjectLookupIter { + return &EncodedObjectLookupIter{ + storage: storage, + series: series, + t: t, + } +} + +// Next returns the next object from the iterator. If the iterator has reached +// the end it will return io.EOF as an error. If the object can't be found in +// the object storage, it will return plumbing.ErrObjectNotFound as an error. +// If the object is retrieved successfully error will be nil. +func (iter *EncodedObjectLookupIter) Next() (plumbing.EncodedObject, error) { + if iter.pos >= len(iter.series) { + return nil, io.EOF + } + + hash := iter.series[iter.pos] + obj, err := iter.storage.EncodedObject(iter.t, hash) + if err == nil { + iter.pos++ + } + + return obj, err +} + +// ForEach call the cb function for each object contained on this iter until +// an error happens or the end of the iter is reached. If ErrStop is sent +// the iteration is stop but no error is returned. The iterator is closed. +func (iter *EncodedObjectLookupIter) ForEach(cb func(plumbing.EncodedObject) error) error { + return ForEachIterator(iter, cb) +} + +// Close releases any resources used by the iterator. +func (iter *EncodedObjectLookupIter) Close() { + iter.pos = len(iter.series) +} + +// EncodedObjectSliceIter implements EncodedObjectIter. It iterates over a +// series of objects stored in a slice and yields each one in turn when Next() +// is called. +// +// The EncodedObjectSliceIter must be closed with a call to Close() when it is +// no longer needed. +type EncodedObjectSliceIter struct { + series []plumbing.EncodedObject +} + +// NewEncodedObjectSliceIter returns an object iterator for the given slice of +// objects. +func NewEncodedObjectSliceIter(series []plumbing.EncodedObject) *EncodedObjectSliceIter { + return &EncodedObjectSliceIter{ + series: series, + } +} + +// Next returns the next object from the iterator. If the iterator has reached +// the end it will return io.EOF as an error. If the object is retrieved +// successfully error will be nil. +func (iter *EncodedObjectSliceIter) Next() (plumbing.EncodedObject, error) { + if len(iter.series) == 0 { + return nil, io.EOF + } + + obj := iter.series[0] + iter.series = iter.series[1:] + + return obj, nil +} + +// ForEach call the cb function for each object contained on this iter until +// an error happens or the end of the iter is reached. If ErrStop is sent +// the iteration is stop but no error is returned. The iterator is closed. +func (iter *EncodedObjectSliceIter) ForEach(cb func(plumbing.EncodedObject) error) error { + return ForEachIterator(iter, cb) +} + +// Close releases any resources used by the iterator. +func (iter *EncodedObjectSliceIter) Close() { + iter.series = []plumbing.EncodedObject{} +} + +// MultiEncodedObjectIter implements EncodedObjectIter. It iterates over several +// EncodedObjectIter, +// +// The MultiObjectIter must be closed with a call to Close() when it is no +// longer needed. +type MultiEncodedObjectIter struct { + iters []EncodedObjectIter +} + +// NewMultiEncodedObjectIter returns an object iterator for the given slice of +// EncodedObjectIters. +func NewMultiEncodedObjectIter(iters []EncodedObjectIter) EncodedObjectIter { + return &MultiEncodedObjectIter{iters: iters} +} + +// Next returns the next object from the iterator, if one iterator reach io.EOF +// is removed and the next one is used. +func (iter *MultiEncodedObjectIter) Next() (plumbing.EncodedObject, error) { + if len(iter.iters) == 0 { + return nil, io.EOF + } + + obj, err := iter.iters[0].Next() + if err == io.EOF { + iter.iters[0].Close() + iter.iters = iter.iters[1:] + return iter.Next() + } + + return obj, err +} + +// ForEach call the cb function for each object contained on this iter until +// an error happens or the end of the iter is reached. If ErrStop is sent +// the iteration is stop but no error is returned. The iterator is closed. +func (iter *MultiEncodedObjectIter) ForEach(cb func(plumbing.EncodedObject) error) error { + return ForEachIterator(iter, cb) +} + +// Close releases any resources used by the iterator. +func (iter *MultiEncodedObjectIter) Close() { + for _, i := range iter.iters { + i.Close() + } +} + +type bareIterator interface { + Next() (plumbing.EncodedObject, error) + Close() +} + +// ForEachIterator is a helper function to build iterators without need to +// rewrite the same ForEach function each time. +func ForEachIterator(iter bareIterator, cb func(plumbing.EncodedObject) error) error { + defer iter.Close() + for { + obj, err := iter.Next() + if err != nil { + if err == io.EOF { + return nil + } + + return err + } + + if err := cb(obj); err != nil { + if err == ErrStop { + return nil + } + + return err + } + } +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/storer/reference.go b/vendor/github.com/go-git/go-git/v5/plumbing/storer/reference.go new file mode 100644 index 0000000000..1d74ef3c6a --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/storer/reference.go @@ -0,0 +1,240 @@ +package storer + +import ( + "errors" + "io" + + "github.com/go-git/go-git/v5/plumbing" +) + +const MaxResolveRecursion = 1024 + +// ErrMaxResolveRecursion is returned by ResolveReference is MaxResolveRecursion +// is exceeded +var ErrMaxResolveRecursion = errors.New("max. recursion level reached") + +// ReferenceStorer is a generic storage of references. +type ReferenceStorer interface { + SetReference(*plumbing.Reference) error + // CheckAndSetReference sets the reference `new`, but if `old` is + // not `nil`, it first checks that the current stored value for + // `old.Name()` matches the given reference value in `old`. If + // not, it returns an error and doesn't update `new`. + CheckAndSetReference(new, old *plumbing.Reference) error + Reference(plumbing.ReferenceName) (*plumbing.Reference, error) + IterReferences() (ReferenceIter, error) + RemoveReference(plumbing.ReferenceName) error + CountLooseRefs() (int, error) + PackRefs() error +} + +// ReferenceIter is a generic closable interface for iterating over references. +type ReferenceIter interface { + Next() (*plumbing.Reference, error) + ForEach(func(*plumbing.Reference) error) error + Close() +} + +type referenceFilteredIter struct { + ff func(r *plumbing.Reference) bool + iter ReferenceIter +} + +// NewReferenceFilteredIter returns a reference iterator for the given reference +// Iterator. This iterator will iterate only references that accomplish the +// provided function. +func NewReferenceFilteredIter( + ff func(r *plumbing.Reference) bool, iter ReferenceIter) ReferenceIter { + return &referenceFilteredIter{ff, iter} +} + +// Next returns the next reference from the iterator. If the iterator has reached +// the end it will return io.EOF as an error. +func (iter *referenceFilteredIter) Next() (*plumbing.Reference, error) { + for { + r, err := iter.iter.Next() + if err != nil { + return nil, err + } + + if iter.ff(r) { + return r, nil + } + + continue + } +} + +// ForEach call the cb function for each reference contained on this iter until +// an error happens or the end of the iter is reached. If ErrStop is sent +// the iteration is stopped but no error is returned. The iterator is closed. +func (iter *referenceFilteredIter) ForEach(cb func(*plumbing.Reference) error) error { + defer iter.Close() + for { + r, err := iter.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + + if err := cb(r); err != nil { + if err == ErrStop { + break + } + + return err + } + } + + return nil +} + +// Close releases any resources used by the iterator. +func (iter *referenceFilteredIter) Close() { + iter.iter.Close() +} + +// ReferenceSliceIter implements ReferenceIter. It iterates over a series of +// references stored in a slice and yields each one in turn when Next() is +// called. +// +// The ReferenceSliceIter must be closed with a call to Close() when it is no +// longer needed. +type ReferenceSliceIter struct { + series []*plumbing.Reference + pos int +} + +// NewReferenceSliceIter returns a reference iterator for the given slice of +// objects. +func NewReferenceSliceIter(series []*plumbing.Reference) ReferenceIter { + return &ReferenceSliceIter{ + series: series, + } +} + +// Next returns the next reference from the iterator. If the iterator has +// reached the end it will return io.EOF as an error. +func (iter *ReferenceSliceIter) Next() (*plumbing.Reference, error) { + if iter.pos >= len(iter.series) { + return nil, io.EOF + } + + obj := iter.series[iter.pos] + iter.pos++ + return obj, nil +} + +// ForEach call the cb function for each reference contained on this iter until +// an error happens or the end of the iter is reached. If ErrStop is sent +// the iteration is stop but no error is returned. The iterator is closed. +func (iter *ReferenceSliceIter) ForEach(cb func(*plumbing.Reference) error) error { + return forEachReferenceIter(iter, cb) +} + +type bareReferenceIterator interface { + Next() (*plumbing.Reference, error) + Close() +} + +func forEachReferenceIter(iter bareReferenceIterator, cb func(*plumbing.Reference) error) error { + defer iter.Close() + for { + obj, err := iter.Next() + if err != nil { + if err == io.EOF { + return nil + } + + return err + } + + if err := cb(obj); err != nil { + if err == ErrStop { + return nil + } + + return err + } + } +} + +// Close releases any resources used by the iterator. +func (iter *ReferenceSliceIter) Close() { + iter.pos = len(iter.series) +} + +// MultiReferenceIter implements ReferenceIter. It iterates over several +// ReferenceIter, +// +// The MultiReferenceIter must be closed with a call to Close() when it is no +// longer needed. +type MultiReferenceIter struct { + iters []ReferenceIter +} + +// NewMultiReferenceIter returns an reference iterator for the given slice of +// EncodedObjectIters. +func NewMultiReferenceIter(iters []ReferenceIter) ReferenceIter { + return &MultiReferenceIter{iters: iters} +} + +// Next returns the next reference from the iterator, if one iterator reach +// io.EOF is removed and the next one is used. +func (iter *MultiReferenceIter) Next() (*plumbing.Reference, error) { + if len(iter.iters) == 0 { + return nil, io.EOF + } + + obj, err := iter.iters[0].Next() + if err == io.EOF { + iter.iters[0].Close() + iter.iters = iter.iters[1:] + return iter.Next() + } + + return obj, err +} + +// ForEach call the cb function for each reference contained on this iter until +// an error happens or the end of the iter is reached. If ErrStop is sent +// the iteration is stop but no error is returned. The iterator is closed. +func (iter *MultiReferenceIter) ForEach(cb func(*plumbing.Reference) error) error { + return forEachReferenceIter(iter, cb) +} + +// Close releases any resources used by the iterator. +func (iter *MultiReferenceIter) Close() { + for _, i := range iter.iters { + i.Close() + } +} + +// ResolveReference resolves a SymbolicReference to a HashReference. +func ResolveReference(s ReferenceStorer, n plumbing.ReferenceName) (*plumbing.Reference, error) { + r, err := s.Reference(n) + if err != nil || r == nil { + return r, err + } + return resolveReference(s, r, 0) +} + +func resolveReference(s ReferenceStorer, r *plumbing.Reference, recursion int) (*plumbing.Reference, error) { + if r.Type() != plumbing.SymbolicReference { + return r, nil + } + + if recursion > MaxResolveRecursion { + return nil, ErrMaxResolveRecursion + } + + t, err := s.Reference(r.Target()) + if err != nil { + return nil, err + } + + recursion++ + return resolveReference(s, t, recursion) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/storer/shallow.go b/vendor/github.com/go-git/go-git/v5/plumbing/storer/shallow.go new file mode 100644 index 0000000000..39ef5ea5c6 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/storer/shallow.go @@ -0,0 +1,10 @@ +package storer + +import "github.com/go-git/go-git/v5/plumbing" + +// ShallowStorer is a storage of references to shallow commits by hash, +// meaning that these commits have missing parents because of a shallow fetch. +type ShallowStorer interface { + SetShallow([]plumbing.Hash) error + Shallow() ([]plumbing.Hash, error) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/storer/storer.go b/vendor/github.com/go-git/go-git/v5/plumbing/storer/storer.go new file mode 100644 index 0000000000..c7bc65a0c4 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/storer/storer.go @@ -0,0 +1,15 @@ +package storer + +// Storer is a basic storer for encoded objects and references. +type Storer interface { + EncodedObjectStorer + ReferenceStorer +} + +// Initializer should be implemented by storers that require to perform any +// operation when creating a new repository (i.e. git init). +type Initializer interface { + // Init performs initialization of the storer and returns the error, if + // any. + Init() error +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/client/client.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/client/client.go new file mode 100644 index 0000000000..4f6d210e98 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/client/client.go @@ -0,0 +1,48 @@ +// Package client contains helper function to deal with the different client +// protocols. +package client + +import ( + "fmt" + + "github.com/go-git/go-git/v5/plumbing/transport" + "github.com/go-git/go-git/v5/plumbing/transport/file" + "github.com/go-git/go-git/v5/plumbing/transport/git" + "github.com/go-git/go-git/v5/plumbing/transport/http" + "github.com/go-git/go-git/v5/plumbing/transport/ssh" +) + +// Protocols are the protocols supported by default. +var Protocols = map[string]transport.Transport{ + "http": http.DefaultClient, + "https": http.DefaultClient, + "ssh": ssh.DefaultClient, + "git": git.DefaultClient, + "file": file.DefaultClient, +} + +// InstallProtocol adds or modifies an existing protocol. +func InstallProtocol(scheme string, c transport.Transport) { + if c == nil { + delete(Protocols, scheme) + return + } + + Protocols[scheme] = c +} + +// NewClient returns the appropriate client among of the set of known protocols: +// http://, https://, ssh:// and file://. +// See `InstallProtocol` to add or modify protocols. +func NewClient(endpoint *transport.Endpoint) (transport.Transport, error) { + f, ok := Protocols[endpoint.Protocol] + if !ok { + return nil, fmt.Errorf("unsupported scheme %q", endpoint.Protocol) + } + + if f == nil { + return nil, fmt.Errorf("malformed client for scheme %q, client is defined as nil", endpoint.Protocol) + } + + return f, nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go new file mode 100644 index 0000000000..ead215557d --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go @@ -0,0 +1,274 @@ +// Package transport includes the implementation for different transport +// protocols. +// +// `Client` can be used to fetch and send packfiles to a git server. +// The `client` package provides higher level functions to instantiate the +// appropriate `Client` based on the repository URL. +// +// go-git supports HTTP and SSH (see `Protocols`), but you can also install +// your own protocols (see the `client` package). +// +// Each protocol has its own implementation of `Client`, but you should +// generally not use them directly, use `client.NewClient` instead. +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net/url" + "strconv" + "strings" + + giturl "github.com/go-git/go-git/v5/internal/url" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/protocol/packp" + "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" +) + +var ( + ErrRepositoryNotFound = errors.New("repository not found") + ErrEmptyRemoteRepository = errors.New("remote repository is empty") + ErrAuthenticationRequired = errors.New("authentication required") + ErrAuthorizationFailed = errors.New("authorization failed") + ErrEmptyUploadPackRequest = errors.New("empty git-upload-pack given") + ErrInvalidAuthMethod = errors.New("invalid auth method") + ErrAlreadyConnected = errors.New("session already established") +) + +const ( + UploadPackServiceName = "git-upload-pack" + ReceivePackServiceName = "git-receive-pack" +) + +// Transport can initiate git-upload-pack and git-receive-pack processes. +// It is implemented both by the client and the server, making this a RPC. +type Transport interface { + // NewUploadPackSession starts a git-upload-pack session for an endpoint. + NewUploadPackSession(*Endpoint, AuthMethod) (UploadPackSession, error) + // NewReceivePackSession starts a git-receive-pack session for an endpoint. + NewReceivePackSession(*Endpoint, AuthMethod) (ReceivePackSession, error) +} + +type Session interface { + // AdvertisedReferences retrieves the advertised references for a + // repository. + // If the repository does not exist, returns ErrRepositoryNotFound. + // If the repository exists, but is empty, returns ErrEmptyRemoteRepository. + AdvertisedReferences() (*packp.AdvRefs, error) + io.Closer +} + +type AuthMethod interface { + fmt.Stringer + Name() string +} + +// UploadPackSession represents a git-upload-pack session. +// A git-upload-pack session has two steps: reference discovery +// (AdvertisedReferences) and uploading pack (UploadPack). +type UploadPackSession interface { + Session + // UploadPack takes a git-upload-pack request and returns a response, + // including a packfile. Don't be confused by terminology, the client + // side of a git-upload-pack is called git-fetch-pack, although here + // the same interface is used to make it RPC-like. + UploadPack(context.Context, *packp.UploadPackRequest) (*packp.UploadPackResponse, error) +} + +// ReceivePackSession represents a git-receive-pack session. +// A git-receive-pack session has two steps: reference discovery +// (AdvertisedReferences) and receiving pack (ReceivePack). +// In that order. +type ReceivePackSession interface { + Session + // ReceivePack sends an update references request and a packfile + // reader and returns a ReportStatus and error. Don't be confused by + // terminology, the client side of a git-receive-pack is called + // git-send-pack, although here the same interface is used to make it + // RPC-like. + ReceivePack(context.Context, *packp.ReferenceUpdateRequest) (*packp.ReportStatus, error) +} + +// Endpoint represents a Git URL in any supported protocol. +type Endpoint struct { + // Protocol is the protocol of the endpoint (e.g. git, https, file). + Protocol string + // User is the user. + User string + // Password is the password. + Password string + // Host is the host. + Host string + // Port is the port to connect, if 0 the default port for the given protocol + // wil be used. + Port int + // Path is the repository path. + Path string +} + +var defaultPorts = map[string]int{ + "http": 80, + "https": 443, + "git": 9418, + "ssh": 22, +} + +// String returns a string representation of the Git URL. +func (u *Endpoint) String() string { + var buf bytes.Buffer + if u.Protocol != "" { + buf.WriteString(u.Protocol) + buf.WriteByte(':') + } + + if u.Protocol != "" || u.Host != "" || u.User != "" || u.Password != "" { + buf.WriteString("//") + + if u.User != "" || u.Password != "" { + buf.WriteString(url.PathEscape(u.User)) + if u.Password != "" { + buf.WriteByte(':') + buf.WriteString(url.PathEscape(u.Password)) + } + + buf.WriteByte('@') + } + + if u.Host != "" { + buf.WriteString(u.Host) + + if u.Port != 0 { + port, ok := defaultPorts[strings.ToLower(u.Protocol)] + if !ok || ok && port != u.Port { + fmt.Fprintf(&buf, ":%d", u.Port) + } + } + } + } + + if u.Path != "" && u.Path[0] != '/' && u.Host != "" { + buf.WriteByte('/') + } + + buf.WriteString(u.Path) + return buf.String() +} + +func NewEndpoint(endpoint string) (*Endpoint, error) { + if e, ok := parseSCPLike(endpoint); ok { + return e, nil + } + + if e, ok := parseFile(endpoint); ok { + return e, nil + } + + return parseURL(endpoint) +} + +func parseURL(endpoint string) (*Endpoint, error) { + u, err := url.Parse(endpoint) + if err != nil { + return nil, err + } + + if !u.IsAbs() { + return nil, plumbing.NewPermanentError(fmt.Errorf( + "invalid endpoint: %s", endpoint, + )) + } + + var user, pass string + if u.User != nil { + user = u.User.Username() + pass, _ = u.User.Password() + } + + return &Endpoint{ + Protocol: u.Scheme, + User: user, + Password: pass, + Host: u.Hostname(), + Port: getPort(u), + Path: getPath(u), + }, nil +} + +func getPort(u *url.URL) int { + p := u.Port() + if p == "" { + return 0 + } + + i, err := strconv.Atoi(p) + if err != nil { + return 0 + } + + return i +} + +func getPath(u *url.URL) string { + var res string = u.Path + if u.RawQuery != "" { + res += "?" + u.RawQuery + } + + if u.Fragment != "" { + res += "#" + u.Fragment + } + + return res +} + +func parseSCPLike(endpoint string) (*Endpoint, bool) { + if giturl.MatchesScheme(endpoint) || !giturl.MatchesScpLike(endpoint) { + return nil, false + } + + user, host, portStr, path := giturl.FindScpLikeComponents(endpoint) + port, err := strconv.Atoi(portStr) + if err != nil { + port = 22 + } + + return &Endpoint{ + Protocol: "ssh", + User: user, + Host: host, + Port: port, + Path: path, + }, true +} + +func parseFile(endpoint string) (*Endpoint, bool) { + if giturl.MatchesScheme(endpoint) { + return nil, false + } + + path := endpoint + return &Endpoint{ + Protocol: "file", + Path: path, + }, true +} + +// UnsupportedCapabilities are the capabilities not supported by any client +// implementation +var UnsupportedCapabilities = []capability.Capability{ + capability.MultiACK, + capability.MultiACKDetailed, + capability.ThinPack, +} + +// FilterUnsupportedCapabilities it filter out all the UnsupportedCapabilities +// from a capability.List, the intended usage is on the client implementation +// to filter the capabilities from an AdvRefs message. +func FilterUnsupportedCapabilities(list *capability.List) { + for _, c := range UnsupportedCapabilities { + list.Delete(c) + } +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/client.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/client.go new file mode 100644 index 0000000000..f6e23652a7 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/client.go @@ -0,0 +1,156 @@ +// Package file implements the file transport protocol. +package file + +import ( + "bufio" + "errors" + "io" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/go-git/go-git/v5/plumbing/transport" + "github.com/go-git/go-git/v5/plumbing/transport/internal/common" +) + +// DefaultClient is the default local client. +var DefaultClient = NewClient( + transport.UploadPackServiceName, + transport.ReceivePackServiceName, +) + +type runner struct { + UploadPackBin string + ReceivePackBin string +} + +// NewClient returns a new local client using the given git-upload-pack and +// git-receive-pack binaries. +func NewClient(uploadPackBin, receivePackBin string) transport.Transport { + return common.NewClient(&runner{ + UploadPackBin: uploadPackBin, + ReceivePackBin: receivePackBin, + }) +} + +func prefixExecPath(cmd string) (string, error) { + // Use `git --exec-path` to find the exec path. + execCmd := exec.Command("git", "--exec-path") + + stdout, err := execCmd.StdoutPipe() + if err != nil { + return "", err + } + stdoutBuf := bufio.NewReader(stdout) + + err = execCmd.Start() + if err != nil { + return "", err + } + + execPathBytes, isPrefix, err := stdoutBuf.ReadLine() + if err != nil { + return "", err + } + if isPrefix { + return "", errors.New("Couldn't read exec-path line all at once") + } + + err = execCmd.Wait() + if err != nil { + return "", err + } + execPath := string(execPathBytes) + execPath = strings.TrimSpace(execPath) + cmd = filepath.Join(execPath, cmd) + + // Make sure it actually exists. + _, err = exec.LookPath(cmd) + if err != nil { + return "", err + } + return cmd, nil +} + +func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod, +) (common.Command, error) { + + switch cmd { + case transport.UploadPackServiceName: + cmd = r.UploadPackBin + case transport.ReceivePackServiceName: + cmd = r.ReceivePackBin + } + + _, err := exec.LookPath(cmd) + if err != nil { + if e, ok := err.(*exec.Error); ok && e.Err == exec.ErrNotFound { + cmd, err = prefixExecPath(cmd) + if err != nil { + return nil, err + } + } else { + return nil, err + } + } + + return &command{cmd: exec.Command(cmd, ep.Path)}, nil +} + +type command struct { + cmd *exec.Cmd + stderrCloser io.Closer + closed bool +} + +func (c *command) Start() error { + return c.cmd.Start() +} + +func (c *command) StderrPipe() (io.Reader, error) { + // Pipe returned by Command.StderrPipe has a race with Read + Command.Wait. + // We use an io.Pipe and close it after the command finishes. + r, w := io.Pipe() + c.cmd.Stderr = w + c.stderrCloser = r + return r, nil +} + +func (c *command) StdinPipe() (io.WriteCloser, error) { + return c.cmd.StdinPipe() +} + +func (c *command) StdoutPipe() (io.Reader, error) { + return c.cmd.StdoutPipe() +} + +func (c *command) Kill() error { + c.cmd.Process.Kill() + return c.Close() +} + +// Close waits for the command to exit. +func (c *command) Close() error { + if c.closed { + return nil + } + + defer func() { + c.closed = true + _ = c.stderrCloser.Close() + + }() + + err := c.cmd.Wait() + if _, ok := err.(*os.PathError); ok { + return nil + } + + // When a repository does not exist, the command exits with code 128. + if _, ok := err.(*exec.ExitError); ok { + return nil + } + + return err +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/server.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/server.go new file mode 100644 index 0000000000..b45d7a71c2 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/server.go @@ -0,0 +1,53 @@ +package file + +import ( + "fmt" + "os" + + "github.com/go-git/go-git/v5/plumbing/transport" + "github.com/go-git/go-git/v5/plumbing/transport/internal/common" + "github.com/go-git/go-git/v5/plumbing/transport/server" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +// ServeUploadPack serves a git-upload-pack request using standard output, input +// and error. This is meant to be used when implementing a git-upload-pack +// command. +func ServeUploadPack(path string) error { + ep, err := transport.NewEndpoint(path) + if err != nil { + return err + } + + // TODO: define and implement a server-side AuthMethod + s, err := server.DefaultServer.NewUploadPackSession(ep, nil) + if err != nil { + return fmt.Errorf("error creating session: %s", err) + } + + return common.ServeUploadPack(srvCmd, s) +} + +// ServeReceivePack serves a git-receive-pack request using standard output, +// input and error. This is meant to be used when implementing a +// git-receive-pack command. +func ServeReceivePack(path string) error { + ep, err := transport.NewEndpoint(path) + if err != nil { + return err + } + + // TODO: define and implement a server-side AuthMethod + s, err := server.DefaultServer.NewReceivePackSession(ep, nil) + if err != nil { + return fmt.Errorf("error creating session: %s", err) + } + + return common.ServeReceivePack(srvCmd, s) +} + +var srvCmd = common.ServerCommand{ + Stdin: os.Stdin, + Stdout: ioutil.WriteNopCloser(os.Stdout), + Stderr: os.Stderr, +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/git/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/git/common.go new file mode 100644 index 0000000000..306aae261f --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/git/common.go @@ -0,0 +1,109 @@ +// Package git implements the git transport protocol. +package git + +import ( + "fmt" + "io" + "net" + + "github.com/go-git/go-git/v5/plumbing/format/pktline" + "github.com/go-git/go-git/v5/plumbing/transport" + "github.com/go-git/go-git/v5/plumbing/transport/internal/common" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +// DefaultClient is the default git client. +var DefaultClient = common.NewClient(&runner{}) + +const DefaultPort = 9418 + +type runner struct{} + +// Command returns a new Command for the given cmd in the given Endpoint +func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (common.Command, error) { + // auth not allowed since git protocol doesn't support authentication + if auth != nil { + return nil, transport.ErrInvalidAuthMethod + } + c := &command{command: cmd, endpoint: ep} + if err := c.connect(); err != nil { + return nil, err + } + return c, nil +} + +type command struct { + conn net.Conn + connected bool + command string + endpoint *transport.Endpoint +} + +// Start executes the command sending the required message to the TCP connection +func (c *command) Start() error { + cmd := endpointToCommand(c.command, c.endpoint) + + e := pktline.NewEncoder(c.conn) + return e.Encode([]byte(cmd)) +} + +func (c *command) connect() error { + if c.connected { + return transport.ErrAlreadyConnected + } + + var err error + c.conn, err = net.Dial("tcp", c.getHostWithPort()) + if err != nil { + return err + } + + c.connected = true + return nil +} + +func (c *command) getHostWithPort() string { + host := c.endpoint.Host + port := c.endpoint.Port + if port <= 0 { + port = DefaultPort + } + + return fmt.Sprintf("%s:%d", host, port) +} + +// StderrPipe git protocol doesn't have any dedicated error channel +func (c *command) StderrPipe() (io.Reader, error) { + return nil, nil +} + +// StdinPipe return the underlying connection as WriteCloser, wrapped to prevent +// call to the Close function from the connection, a command execution in git +// protocol can't be closed or killed +func (c *command) StdinPipe() (io.WriteCloser, error) { + return ioutil.WriteNopCloser(c.conn), nil +} + +// StdoutPipe return the underlying connection as Reader +func (c *command) StdoutPipe() (io.Reader, error) { + return c.conn, nil +} + +func endpointToCommand(cmd string, ep *transport.Endpoint) string { + host := ep.Host + if ep.Port != DefaultPort { + host = fmt.Sprintf("%s:%d", ep.Host, ep.Port) + } + + return fmt.Sprintf("%s %s%chost=%s%c", cmd, ep.Path, 0, host, 0) +} + +// Close closes the TCP connection and connection. +func (c *command) Close() error { + if !c.connected { + return nil + } + + c.connected = false + return c.conn.Close() +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go new file mode 100644 index 0000000000..aeedc5bb55 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go @@ -0,0 +1,281 @@ +// Package http implements the HTTP transport protocol. +package http + +import ( + "bytes" + "fmt" + "net" + "net/http" + "strconv" + "strings" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/protocol/packp" + "github.com/go-git/go-git/v5/plumbing/transport" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +// it requires a bytes.Buffer, because we need to know the length +func applyHeadersToRequest(req *http.Request, content *bytes.Buffer, host string, requestType string) { + req.Header.Add("User-Agent", "git/1.0") + req.Header.Add("Host", host) // host:port + + if content == nil { + req.Header.Add("Accept", "*/*") + return + } + + req.Header.Add("Accept", fmt.Sprintf("application/x-%s-result", requestType)) + req.Header.Add("Content-Type", fmt.Sprintf("application/x-%s-request", requestType)) + req.Header.Add("Content-Length", strconv.Itoa(content.Len())) +} + +const infoRefsPath = "/info/refs" + +func advertisedReferences(s *session, serviceName string) (ref *packp.AdvRefs, err error) { + url := fmt.Sprintf( + "%s%s?service=%s", + s.endpoint.String(), infoRefsPath, serviceName, + ) + + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return nil, err + } + + s.ApplyAuthToRequest(req) + applyHeadersToRequest(req, nil, s.endpoint.Host, serviceName) + res, err := s.client.Do(req) + if err != nil { + return nil, err + } + + s.ModifyEndpointIfRedirect(res) + defer ioutil.CheckClose(res.Body, &err) + + if err = NewErr(res); err != nil { + return nil, err + } + + ar := packp.NewAdvRefs() + if err = ar.Decode(res.Body); err != nil { + if err == packp.ErrEmptyAdvRefs { + err = transport.ErrEmptyRemoteRepository + } + + return nil, err + } + + transport.FilterUnsupportedCapabilities(ar.Capabilities) + s.advRefs = ar + + return ar, nil +} + +type client struct { + c *http.Client +} + +// DefaultClient is the default HTTP client, which uses `http.DefaultClient`. +var DefaultClient = NewClient(nil) + +// NewClient creates a new client with a custom net/http client. +// See `InstallProtocol` to install and override default http client. +// Unless a properly initialized client is given, it will fall back into +// `http.DefaultClient`. +// +// Note that for HTTP client cannot distinguish between private repositories and +// unexistent repositories on GitHub. So it returns `ErrAuthorizationRequired` +// for both. +func NewClient(c *http.Client) transport.Transport { + if c == nil { + return &client{http.DefaultClient} + } + + return &client{ + c: c, + } +} + +func (c *client) NewUploadPackSession(ep *transport.Endpoint, auth transport.AuthMethod) ( + transport.UploadPackSession, error) { + + return newUploadPackSession(c.c, ep, auth) +} + +func (c *client) NewReceivePackSession(ep *transport.Endpoint, auth transport.AuthMethod) ( + transport.ReceivePackSession, error) { + + return newReceivePackSession(c.c, ep, auth) +} + +type session struct { + auth AuthMethod + client *http.Client + endpoint *transport.Endpoint + advRefs *packp.AdvRefs +} + +func newSession(c *http.Client, ep *transport.Endpoint, auth transport.AuthMethod) (*session, error) { + s := &session{ + auth: basicAuthFromEndpoint(ep), + client: c, + endpoint: ep, + } + if auth != nil { + a, ok := auth.(AuthMethod) + if !ok { + return nil, transport.ErrInvalidAuthMethod + } + + s.auth = a + } + + return s, nil +} + +func (s *session) ApplyAuthToRequest(req *http.Request) { + if s.auth == nil { + return + } + + s.auth.SetAuth(req) +} + +func (s *session) ModifyEndpointIfRedirect(res *http.Response) { + if res.Request == nil { + return + } + + r := res.Request + if !strings.HasSuffix(r.URL.Path, infoRefsPath) { + return + } + + h, p, err := net.SplitHostPort(r.URL.Host) + if err != nil { + h = r.URL.Host + } + if p != "" { + port, err := strconv.Atoi(p) + if err == nil { + s.endpoint.Port = port + } + } + s.endpoint.Host = h + + s.endpoint.Protocol = r.URL.Scheme + s.endpoint.Path = r.URL.Path[:len(r.URL.Path)-len(infoRefsPath)] +} + +func (*session) Close() error { + return nil +} + +// AuthMethod is concrete implementation of common.AuthMethod for HTTP services +type AuthMethod interface { + transport.AuthMethod + SetAuth(r *http.Request) +} + +func basicAuthFromEndpoint(ep *transport.Endpoint) *BasicAuth { + u := ep.User + if u == "" { + return nil + } + + return &BasicAuth{u, ep.Password} +} + +// BasicAuth represent a HTTP basic auth +type BasicAuth struct { + Username, Password string +} + +func (a *BasicAuth) SetAuth(r *http.Request) { + if a == nil { + return + } + + r.SetBasicAuth(a.Username, a.Password) +} + +// Name is name of the auth +func (a *BasicAuth) Name() string { + return "http-basic-auth" +} + +func (a *BasicAuth) String() string { + masked := "*******" + if a.Password == "" { + masked = "" + } + + return fmt.Sprintf("%s - %s:%s", a.Name(), a.Username, masked) +} + +// TokenAuth implements an http.AuthMethod that can be used with http transport +// to authenticate with HTTP token authentication (also known as bearer +// authentication). +// +// IMPORTANT: If you are looking to use OAuth tokens with popular servers (e.g. +// GitHub, Bitbucket, GitLab) you should use BasicAuth instead. These servers +// use basic HTTP authentication, with the OAuth token as user or password. +// Check the documentation of your git server for details. +type TokenAuth struct { + Token string +} + +func (a *TokenAuth) SetAuth(r *http.Request) { + if a == nil { + return + } + r.Header.Add("Authorization", fmt.Sprintf("Bearer %s", a.Token)) +} + +// Name is name of the auth +func (a *TokenAuth) Name() string { + return "http-token-auth" +} + +func (a *TokenAuth) String() string { + masked := "*******" + if a.Token == "" { + masked = "" + } + return fmt.Sprintf("%s - %s", a.Name(), masked) +} + +// Err is a dedicated error to return errors based on status code +type Err struct { + Response *http.Response +} + +// NewErr returns a new Err based on a http response +func NewErr(r *http.Response) error { + if r.StatusCode >= http.StatusOK && r.StatusCode < http.StatusMultipleChoices { + return nil + } + + switch r.StatusCode { + case http.StatusUnauthorized: + return transport.ErrAuthenticationRequired + case http.StatusForbidden: + return transport.ErrAuthorizationFailed + case http.StatusNotFound: + return transport.ErrRepositoryNotFound + } + + return plumbing.NewUnexpectedError(&Err{r}) +} + +// StatusCode returns the status code of the response +func (e *Err) StatusCode() int { + return e.Response.StatusCode +} + +func (e *Err) Error() string { + return fmt.Sprintf("unexpected requesting %q status code: %d", + e.Response.Request.URL, e.Response.StatusCode, + ) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/receive_pack.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/receive_pack.go new file mode 100644 index 0000000000..433dfcfda1 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/receive_pack.go @@ -0,0 +1,106 @@ +package http + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/protocol/packp" + "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" + "github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband" + "github.com/go-git/go-git/v5/plumbing/transport" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +type rpSession struct { + *session +} + +func newReceivePackSession(c *http.Client, ep *transport.Endpoint, auth transport.AuthMethod) (transport.ReceivePackSession, error) { + s, err := newSession(c, ep, auth) + return &rpSession{s}, err +} + +func (s *rpSession) AdvertisedReferences() (*packp.AdvRefs, error) { + return advertisedReferences(s.session, transport.ReceivePackServiceName) +} + +func (s *rpSession) ReceivePack(ctx context.Context, req *packp.ReferenceUpdateRequest) ( + *packp.ReportStatus, error) { + url := fmt.Sprintf( + "%s/%s", + s.endpoint.String(), transport.ReceivePackServiceName, + ) + + buf := bytes.NewBuffer(nil) + if err := req.Encode(buf); err != nil { + return nil, err + } + + res, err := s.doRequest(ctx, http.MethodPost, url, buf) + if err != nil { + return nil, err + } + + r, err := ioutil.NonEmptyReader(res.Body) + if err == ioutil.ErrEmptyReader { + return nil, nil + } + + if err != nil { + return nil, err + } + + var d *sideband.Demuxer + if req.Capabilities.Supports(capability.Sideband64k) { + d = sideband.NewDemuxer(sideband.Sideband64k, r) + } else if req.Capabilities.Supports(capability.Sideband) { + d = sideband.NewDemuxer(sideband.Sideband, r) + } + if d != nil { + d.Progress = req.Progress + r = d + } + + rc := ioutil.NewReadCloser(r, res.Body) + + report := packp.NewReportStatus() + if err := report.Decode(rc); err != nil { + return nil, err + } + + return report, report.Error() +} + +func (s *rpSession) doRequest( + ctx context.Context, method, url string, content *bytes.Buffer, +) (*http.Response, error) { + + var body io.Reader + if content != nil { + body = content + } + + req, err := http.NewRequest(method, url, body) + if err != nil { + return nil, plumbing.NewPermanentError(err) + } + + applyHeadersToRequest(req, content, s.endpoint.Host, transport.ReceivePackServiceName) + s.ApplyAuthToRequest(req) + + res, err := s.client.Do(req.WithContext(ctx)) + if err != nil { + return nil, plumbing.NewUnexpectedError(err) + } + + if err := NewErr(res); err != nil { + _ = res.Body.Close() + return nil, err + } + + return res, nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/upload_pack.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/upload_pack.go new file mode 100644 index 0000000000..db37089402 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/upload_pack.go @@ -0,0 +1,123 @@ +package http + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/format/pktline" + "github.com/go-git/go-git/v5/plumbing/protocol/packp" + "github.com/go-git/go-git/v5/plumbing/transport" + "github.com/go-git/go-git/v5/plumbing/transport/internal/common" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +type upSession struct { + *session +} + +func newUploadPackSession(c *http.Client, ep *transport.Endpoint, auth transport.AuthMethod) (transport.UploadPackSession, error) { + s, err := newSession(c, ep, auth) + return &upSession{s}, err +} + +func (s *upSession) AdvertisedReferences() (*packp.AdvRefs, error) { + return advertisedReferences(s.session, transport.UploadPackServiceName) +} + +func (s *upSession) UploadPack( + ctx context.Context, req *packp.UploadPackRequest, +) (*packp.UploadPackResponse, error) { + + if req.IsEmpty() { + return nil, transport.ErrEmptyUploadPackRequest + } + + if err := req.Validate(); err != nil { + return nil, err + } + + url := fmt.Sprintf( + "%s/%s", + s.endpoint.String(), transport.UploadPackServiceName, + ) + + content, err := uploadPackRequestToReader(req) + if err != nil { + return nil, err + } + + res, err := s.doRequest(ctx, http.MethodPost, url, content) + if err != nil { + return nil, err + } + + r, err := ioutil.NonEmptyReader(res.Body) + if err != nil { + if err == ioutil.ErrEmptyReader || err == io.ErrUnexpectedEOF { + return nil, transport.ErrEmptyUploadPackRequest + } + + return nil, err + } + + rc := ioutil.NewReadCloser(r, res.Body) + return common.DecodeUploadPackResponse(rc, req) +} + +// Close does nothing. +func (s *upSession) Close() error { + return nil +} + +func (s *upSession) doRequest( + ctx context.Context, method, url string, content *bytes.Buffer, +) (*http.Response, error) { + + var body io.Reader + if content != nil { + body = content + } + + req, err := http.NewRequest(method, url, body) + if err != nil { + return nil, plumbing.NewPermanentError(err) + } + + applyHeadersToRequest(req, content, s.endpoint.Host, transport.UploadPackServiceName) + s.ApplyAuthToRequest(req) + + res, err := s.client.Do(req.WithContext(ctx)) + if err != nil { + return nil, plumbing.NewUnexpectedError(err) + } + + if err := NewErr(res); err != nil { + _ = res.Body.Close() + return nil, err + } + + return res, nil +} + +func uploadPackRequestToReader(req *packp.UploadPackRequest) (*bytes.Buffer, error) { + buf := bytes.NewBuffer(nil) + e := pktline.NewEncoder(buf) + + if err := req.UploadRequest.Encode(buf); err != nil { + return nil, fmt.Errorf("sending upload-req message: %s", err) + } + + if err := req.UploadHaves.Encode(buf, false); err != nil { + return nil, fmt.Errorf("sending haves message: %s", err) + } + + if err := e.EncodeString("done\n"); err != nil { + return nil, err + } + + return buf, nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go new file mode 100644 index 0000000000..89432e34c1 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go @@ -0,0 +1,474 @@ +// Package common implements the git pack protocol with a pluggable transport. +// This is a low-level package to implement new transports. Use a concrete +// implementation instead (e.g. http, file, ssh). +// +// A simple example of usage can be found in the file package. +package common + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + stdioutil "io/ioutil" + "strings" + "time" + + "github.com/go-git/go-git/v5/plumbing/format/pktline" + "github.com/go-git/go-git/v5/plumbing/protocol/packp" + "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" + "github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband" + "github.com/go-git/go-git/v5/plumbing/transport" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +const ( + readErrorSecondsTimeout = 10 +) + +var ( + ErrTimeoutExceeded = errors.New("timeout exceeded") +) + +// Commander creates Command instances. This is the main entry point for +// transport implementations. +type Commander interface { + // Command creates a new Command for the given git command and + // endpoint. cmd can be git-upload-pack or git-receive-pack. An + // error should be returned if the endpoint is not supported or the + // command cannot be created (e.g. binary does not exist, connection + // cannot be established). + Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (Command, error) +} + +// Command is used for a single command execution. +// This interface is modeled after exec.Cmd and ssh.Session in the standard +// library. +type Command interface { + // StderrPipe returns a pipe that will be connected to the command's + // standard error when the command starts. It should not be called after + // Start. + StderrPipe() (io.Reader, error) + // StdinPipe returns a pipe that will be connected to the command's + // standard input when the command starts. It should not be called after + // Start. The pipe should be closed when no more input is expected. + StdinPipe() (io.WriteCloser, error) + // StdoutPipe returns a pipe that will be connected to the command's + // standard output when the command starts. It should not be called after + // Start. + StdoutPipe() (io.Reader, error) + // Start starts the specified command. It does not wait for it to + // complete. + Start() error + // Close closes the command and releases any resources used by it. It + // will block until the command exits. + Close() error +} + +// CommandKiller expands the Command interface, enabling it for being killed. +type CommandKiller interface { + // Kill and close the session whatever the state it is. It will block until + // the command is terminated. + Kill() error +} + +type client struct { + cmdr Commander +} + +// NewClient creates a new client using the given Commander. +func NewClient(runner Commander) transport.Transport { + return &client{runner} +} + +// NewUploadPackSession creates a new UploadPackSession. +func (c *client) NewUploadPackSession(ep *transport.Endpoint, auth transport.AuthMethod) ( + transport.UploadPackSession, error) { + + return c.newSession(transport.UploadPackServiceName, ep, auth) +} + +// NewReceivePackSession creates a new ReceivePackSession. +func (c *client) NewReceivePackSession(ep *transport.Endpoint, auth transport.AuthMethod) ( + transport.ReceivePackSession, error) { + + return c.newSession(transport.ReceivePackServiceName, ep, auth) +} + +type session struct { + Stdin io.WriteCloser + Stdout io.Reader + Command Command + + isReceivePack bool + advRefs *packp.AdvRefs + packRun bool + finished bool + firstErrLine chan string +} + +func (c *client) newSession(s string, ep *transport.Endpoint, auth transport.AuthMethod) (*session, error) { + cmd, err := c.cmdr.Command(s, ep, auth) + if err != nil { + return nil, err + } + + stdin, err := cmd.StdinPipe() + if err != nil { + return nil, err + } + + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + + stderr, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + + if err := cmd.Start(); err != nil { + return nil, err + } + + return &session{ + Stdin: stdin, + Stdout: stdout, + Command: cmd, + firstErrLine: c.listenFirstError(stderr), + isReceivePack: s == transport.ReceivePackServiceName, + }, nil +} + +func (c *client) listenFirstError(r io.Reader) chan string { + if r == nil { + return nil + } + + errLine := make(chan string, 1) + go func() { + s := bufio.NewScanner(r) + if s.Scan() { + errLine <- s.Text() + } else { + close(errLine) + } + + _, _ = io.Copy(stdioutil.Discard, r) + }() + + return errLine +} + +// AdvertisedReferences retrieves the advertised references from the server. +func (s *session) AdvertisedReferences() (*packp.AdvRefs, error) { + if s.advRefs != nil { + return s.advRefs, nil + } + + ar := packp.NewAdvRefs() + if err := ar.Decode(s.Stdout); err != nil { + if err := s.handleAdvRefDecodeError(err); err != nil { + return nil, err + } + } + + // Some servers like jGit, announce capabilities instead of returning an + // packp message with a flush. This verifies that we received a empty + // adv-refs, even it contains capabilities. + if !s.isReceivePack && ar.IsEmpty() { + return nil, transport.ErrEmptyRemoteRepository + } + + transport.FilterUnsupportedCapabilities(ar.Capabilities) + s.advRefs = ar + return ar, nil +} + +func (s *session) handleAdvRefDecodeError(err error) error { + // If repository is not found, we get empty stdout and server writes an + // error to stderr. + if err == packp.ErrEmptyInput { + s.finished = true + if err := s.checkNotFoundError(); err != nil { + return err + } + + return io.ErrUnexpectedEOF + } + + // For empty (but existing) repositories, we get empty advertised-references + // message. But valid. That is, it includes at least a flush. + if err == packp.ErrEmptyAdvRefs { + // Empty repositories are valid for git-receive-pack. + if s.isReceivePack { + return nil + } + + if err := s.finish(); err != nil { + return err + } + + return transport.ErrEmptyRemoteRepository + } + + // Some server sends the errors as normal content (git protocol), so when + // we try to decode it fails, we need to check the content of it, to detect + // not found errors + if uerr, ok := err.(*packp.ErrUnexpectedData); ok { + if isRepoNotFoundError(string(uerr.Data)) { + return transport.ErrRepositoryNotFound + } + } + + return err +} + +// UploadPack performs a request to the server to fetch a packfile. A reader is +// returned with the packfile content. The reader must be closed after reading. +func (s *session) UploadPack(ctx context.Context, req *packp.UploadPackRequest) (*packp.UploadPackResponse, error) { + if req.IsEmpty() { + return nil, transport.ErrEmptyUploadPackRequest + } + + if err := req.Validate(); err != nil { + return nil, err + } + + if _, err := s.AdvertisedReferences(); err != nil { + return nil, err + } + + s.packRun = true + + in := s.StdinContext(ctx) + out := s.StdoutContext(ctx) + + if err := uploadPack(in, out, req); err != nil { + return nil, err + } + + r, err := ioutil.NonEmptyReader(out) + if err == ioutil.ErrEmptyReader { + if c, ok := s.Stdout.(io.Closer); ok { + _ = c.Close() + } + + return nil, transport.ErrEmptyUploadPackRequest + } + + if err != nil { + return nil, err + } + + rc := ioutil.NewReadCloser(r, s) + return DecodeUploadPackResponse(rc, req) +} + +func (s *session) StdinContext(ctx context.Context) io.WriteCloser { + return ioutil.NewWriteCloserOnError( + ioutil.NewContextWriteCloser(ctx, s.Stdin), + s.onError, + ) +} + +func (s *session) StdoutContext(ctx context.Context) io.Reader { + return ioutil.NewReaderOnError( + ioutil.NewContextReader(ctx, s.Stdout), + s.onError, + ) +} + +func (s *session) onError(err error) { + if k, ok := s.Command.(CommandKiller); ok { + _ = k.Kill() + } + + _ = s.Close() +} + +func (s *session) ReceivePack(ctx context.Context, req *packp.ReferenceUpdateRequest) (*packp.ReportStatus, error) { + if _, err := s.AdvertisedReferences(); err != nil { + return nil, err + } + + s.packRun = true + + w := s.StdinContext(ctx) + if err := req.Encode(w); err != nil { + return nil, err + } + + if err := w.Close(); err != nil { + return nil, err + } + + if !req.Capabilities.Supports(capability.ReportStatus) { + // If we don't have report-status, we can only + // check return value error. + return nil, s.Command.Close() + } + + r := s.StdoutContext(ctx) + + var d *sideband.Demuxer + if req.Capabilities.Supports(capability.Sideband64k) { + d = sideband.NewDemuxer(sideband.Sideband64k, r) + } else if req.Capabilities.Supports(capability.Sideband) { + d = sideband.NewDemuxer(sideband.Sideband, r) + } + if d != nil { + d.Progress = req.Progress + r = d + } + + report := packp.NewReportStatus() + if err := report.Decode(r); err != nil { + return nil, err + } + + if err := report.Error(); err != nil { + defer s.Close() + return report, err + } + + return report, s.Command.Close() +} + +func (s *session) finish() error { + if s.finished { + return nil + } + + s.finished = true + + // If we did not run a upload/receive-pack, we close the connection + // gracefully by sending a flush packet to the server. If the server + // operates correctly, it will exit with status 0. + if !s.packRun { + _, err := s.Stdin.Write(pktline.FlushPkt) + return err + } + + return nil +} + +func (s *session) Close() (err error) { + err = s.finish() + + defer ioutil.CheckClose(s.Command, &err) + return +} + +func (s *session) checkNotFoundError() error { + t := time.NewTicker(time.Second * readErrorSecondsTimeout) + defer t.Stop() + + select { + case <-t.C: + return ErrTimeoutExceeded + case line, ok := <-s.firstErrLine: + if !ok { + return nil + } + + if isRepoNotFoundError(line) { + return transport.ErrRepositoryNotFound + } + + return fmt.Errorf("unknown error: %s", line) + } +} + +var ( + githubRepoNotFoundErr = "ERROR: Repository not found." + bitbucketRepoNotFoundErr = "conq: repository does not exist." + localRepoNotFoundErr = "does not appear to be a git repository" + gitProtocolNotFoundErr = "ERR \n Repository not found." + gitProtocolNoSuchErr = "ERR no such repository" + gitProtocolAccessDeniedErr = "ERR access denied" + gogsAccessDeniedErr = "Gogs: Repository does not exist or you do not have access" +) + +func isRepoNotFoundError(s string) bool { + if strings.HasPrefix(s, githubRepoNotFoundErr) { + return true + } + + if strings.HasPrefix(s, bitbucketRepoNotFoundErr) { + return true + } + + if strings.HasSuffix(s, localRepoNotFoundErr) { + return true + } + + if strings.HasPrefix(s, gitProtocolNotFoundErr) { + return true + } + + if strings.HasPrefix(s, gitProtocolNoSuchErr) { + return true + } + + if strings.HasPrefix(s, gitProtocolAccessDeniedErr) { + return true + } + + if strings.HasPrefix(s, gogsAccessDeniedErr) { + return true + } + + return false +} + +var ( + nak = []byte("NAK") + eol = []byte("\n") +) + +// uploadPack implements the git-upload-pack protocol. +func uploadPack(w io.WriteCloser, r io.Reader, req *packp.UploadPackRequest) error { + // TODO support multi_ack mode + // TODO support multi_ack_detailed mode + // TODO support acks for common objects + // TODO build a proper state machine for all these processing options + + if err := req.UploadRequest.Encode(w); err != nil { + return fmt.Errorf("sending upload-req message: %s", err) + } + + if err := req.UploadHaves.Encode(w, true); err != nil { + return fmt.Errorf("sending haves message: %s", err) + } + + if err := sendDone(w); err != nil { + return fmt.Errorf("sending done message: %s", err) + } + + if err := w.Close(); err != nil { + return fmt.Errorf("closing input: %s", err) + } + + return nil +} + +func sendDone(w io.Writer) error { + e := pktline.NewEncoder(w) + + return e.Encodef("done\n") +} + +// DecodeUploadPackResponse decodes r into a new packp.UploadPackResponse +func DecodeUploadPackResponse(r io.ReadCloser, req *packp.UploadPackRequest) ( + *packp.UploadPackResponse, error, +) { + res := packp.NewUploadPackResponse(req) + if err := res.Decode(r); err != nil { + return nil, fmt.Errorf("error decoding upload-pack response: %s", err) + } + + return res, nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/server.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/server.go new file mode 100644 index 0000000000..e2480848a4 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/server.go @@ -0,0 +1,73 @@ +package common + +import ( + "context" + "fmt" + "io" + + "github.com/go-git/go-git/v5/plumbing/protocol/packp" + "github.com/go-git/go-git/v5/plumbing/transport" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +// ServerCommand is used for a single server command execution. +type ServerCommand struct { + Stderr io.Writer + Stdout io.WriteCloser + Stdin io.Reader +} + +func ServeUploadPack(cmd ServerCommand, s transport.UploadPackSession) (err error) { + ioutil.CheckClose(cmd.Stdout, &err) + + ar, err := s.AdvertisedReferences() + if err != nil { + return err + } + + if err := ar.Encode(cmd.Stdout); err != nil { + return err + } + + req := packp.NewUploadPackRequest() + if err := req.Decode(cmd.Stdin); err != nil { + return err + } + + var resp *packp.UploadPackResponse + resp, err = s.UploadPack(context.TODO(), req) + if err != nil { + return err + } + + return resp.Encode(cmd.Stdout) +} + +func ServeReceivePack(cmd ServerCommand, s transport.ReceivePackSession) error { + ar, err := s.AdvertisedReferences() + if err != nil { + return fmt.Errorf("internal error in advertised references: %s", err) + } + + if err := ar.Encode(cmd.Stdout); err != nil { + return fmt.Errorf("error in advertised references encoding: %s", err) + } + + req := packp.NewReferenceUpdateRequest() + if err := req.Decode(cmd.Stdin); err != nil { + return fmt.Errorf("error decoding: %s", err) + } + + rs, err := s.ReceivePack(context.TODO(), req) + if rs != nil { + if err := rs.Encode(cmd.Stdout); err != nil { + return fmt.Errorf("error in encoding report status %s", err) + } + } + + if err != nil { + return fmt.Errorf("error in receive pack: %s", err) + } + + return nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/loader.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/loader.go new file mode 100644 index 0000000000..e7e2b075e5 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/loader.go @@ -0,0 +1,64 @@ +package server + +import ( + "github.com/go-git/go-git/v5/plumbing/cache" + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/plumbing/transport" + "github.com/go-git/go-git/v5/storage/filesystem" + + "github.com/go-git/go-billy/v5" + "github.com/go-git/go-billy/v5/osfs" +) + +// DefaultLoader is a filesystem loader ignoring host and resolving paths to /. +var DefaultLoader = NewFilesystemLoader(osfs.New("")) + +// Loader loads repository's storer.Storer based on an optional host and a path. +type Loader interface { + // Load loads a storer.Storer given a transport.Endpoint. + // Returns transport.ErrRepositoryNotFound if the repository does not + // exist. + Load(ep *transport.Endpoint) (storer.Storer, error) +} + +type fsLoader struct { + base billy.Filesystem +} + +// NewFilesystemLoader creates a Loader that ignores host and resolves paths +// with a given base filesystem. +func NewFilesystemLoader(base billy.Filesystem) Loader { + return &fsLoader{base} +} + +// Load looks up the endpoint's path in the base file system and returns a +// storer for it. Returns transport.ErrRepositoryNotFound if a repository does +// not exist in the given path. +func (l *fsLoader) Load(ep *transport.Endpoint) (storer.Storer, error) { + fs, err := l.base.Chroot(ep.Path) + if err != nil { + return nil, err + } + + if _, err := fs.Stat("config"); err != nil { + return nil, transport.ErrRepositoryNotFound + } + + return filesystem.NewStorage(fs, cache.NewObjectLRUDefault()), nil +} + +// MapLoader is a Loader that uses a lookup map of storer.Storer by +// transport.Endpoint. +type MapLoader map[string]storer.Storer + +// Load returns a storer.Storer for given a transport.Endpoint by looking it up +// in the map. Returns transport.ErrRepositoryNotFound if the endpoint does not +// exist. +func (l MapLoader) Load(ep *transport.Endpoint) (storer.Storer, error) { + s, ok := l[ep.String()] + if !ok { + return nil, transport.ErrRepositoryNotFound + } + + return s, nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/server.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/server.go new file mode 100644 index 0000000000..727f902150 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/server.go @@ -0,0 +1,424 @@ +// Package server implements the git server protocol. For most use cases, the +// transport-specific implementations should be used. +package server + +import ( + "context" + "errors" + "fmt" + "io" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/format/packfile" + "github.com/go-git/go-git/v5/plumbing/protocol/packp" + "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" + "github.com/go-git/go-git/v5/plumbing/revlist" + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/plumbing/transport" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +var DefaultServer = NewServer(DefaultLoader) + +type server struct { + loader Loader + handler *handler +} + +// NewServer returns a transport.Transport implementing a git server, +// independent of transport. Each transport must wrap this. +func NewServer(loader Loader) transport.Transport { + return &server{ + loader, + &handler{asClient: false}, + } +} + +// NewClient returns a transport.Transport implementing a client with an +// embedded server. +func NewClient(loader Loader) transport.Transport { + return &server{ + loader, + &handler{asClient: true}, + } +} + +func (s *server) NewUploadPackSession(ep *transport.Endpoint, auth transport.AuthMethod) (transport.UploadPackSession, error) { + sto, err := s.loader.Load(ep) + if err != nil { + return nil, err + } + + return s.handler.NewUploadPackSession(sto) +} + +func (s *server) NewReceivePackSession(ep *transport.Endpoint, auth transport.AuthMethod) (transport.ReceivePackSession, error) { + sto, err := s.loader.Load(ep) + if err != nil { + return nil, err + } + + return s.handler.NewReceivePackSession(sto) +} + +type handler struct { + asClient bool +} + +func (h *handler) NewUploadPackSession(s storer.Storer) (transport.UploadPackSession, error) { + return &upSession{ + session: session{storer: s, asClient: h.asClient}, + }, nil +} + +func (h *handler) NewReceivePackSession(s storer.Storer) (transport.ReceivePackSession, error) { + return &rpSession{ + session: session{storer: s, asClient: h.asClient}, + cmdStatus: map[plumbing.ReferenceName]error{}, + }, nil +} + +type session struct { + storer storer.Storer + caps *capability.List + asClient bool +} + +func (s *session) Close() error { + return nil +} + +func (s *session) SetAuth(transport.AuthMethod) error { + //TODO: deprecate + return nil +} + +func (s *session) checkSupportedCapabilities(cl *capability.List) error { + for _, c := range cl.All() { + if !s.caps.Supports(c) { + return fmt.Errorf("unsupported capability: %s", c) + } + } + + return nil +} + +type upSession struct { + session +} + +func (s *upSession) AdvertisedReferences() (*packp.AdvRefs, error) { + ar := packp.NewAdvRefs() + + if err := s.setSupportedCapabilities(ar.Capabilities); err != nil { + return nil, err + } + + s.caps = ar.Capabilities + + if err := setReferences(s.storer, ar); err != nil { + return nil, err + } + + if err := setHEAD(s.storer, ar); err != nil { + return nil, err + } + + if s.asClient && len(ar.References) == 0 { + return nil, transport.ErrEmptyRemoteRepository + } + + return ar, nil +} + +func (s *upSession) UploadPack(ctx context.Context, req *packp.UploadPackRequest) (*packp.UploadPackResponse, error) { + if req.IsEmpty() { + return nil, transport.ErrEmptyUploadPackRequest + } + + if err := req.Validate(); err != nil { + return nil, err + } + + if s.caps == nil { + s.caps = capability.NewList() + if err := s.setSupportedCapabilities(s.caps); err != nil { + return nil, err + } + } + + if err := s.checkSupportedCapabilities(req.Capabilities); err != nil { + return nil, err + } + + s.caps = req.Capabilities + + if len(req.Shallows) > 0 { + return nil, fmt.Errorf("shallow not supported") + } + + objs, err := s.objectsToUpload(req) + if err != nil { + return nil, err + } + + pr, pw := io.Pipe() + e := packfile.NewEncoder(pw, s.storer, false) + go func() { + // TODO: plumb through a pack window. + _, err := e.Encode(objs, 10) + pw.CloseWithError(err) + }() + + return packp.NewUploadPackResponseWithPackfile(req, + ioutil.NewContextReadCloser(ctx, pr), + ), nil +} + +func (s *upSession) objectsToUpload(req *packp.UploadPackRequest) ([]plumbing.Hash, error) { + haves, err := revlist.Objects(s.storer, req.Haves, nil) + if err != nil { + return nil, err + } + + return revlist.Objects(s.storer, req.Wants, haves) +} + +func (*upSession) setSupportedCapabilities(c *capability.List) error { + if err := c.Set(capability.Agent, capability.DefaultAgent); err != nil { + return err + } + + if err := c.Set(capability.OFSDelta); err != nil { + return err + } + + return nil +} + +type rpSession struct { + session + cmdStatus map[plumbing.ReferenceName]error + firstErr error + unpackErr error +} + +func (s *rpSession) AdvertisedReferences() (*packp.AdvRefs, error) { + ar := packp.NewAdvRefs() + + if err := s.setSupportedCapabilities(ar.Capabilities); err != nil { + return nil, err + } + + s.caps = ar.Capabilities + + if err := setReferences(s.storer, ar); err != nil { + return nil, err + } + + if err := setHEAD(s.storer, ar); err != nil { + return nil, err + } + + return ar, nil +} + +var ( + ErrUpdateReference = errors.New("failed to update ref") +) + +func (s *rpSession) ReceivePack(ctx context.Context, req *packp.ReferenceUpdateRequest) (*packp.ReportStatus, error) { + if s.caps == nil { + s.caps = capability.NewList() + if err := s.setSupportedCapabilities(s.caps); err != nil { + return nil, err + } + } + + if err := s.checkSupportedCapabilities(req.Capabilities); err != nil { + return nil, err + } + + s.caps = req.Capabilities + + //TODO: Implement 'atomic' update of references. + + if req.Packfile != nil { + r := ioutil.NewContextReadCloser(ctx, req.Packfile) + if err := s.writePackfile(r); err != nil { + s.unpackErr = err + s.firstErr = err + return s.reportStatus(), err + } + } + + s.updateReferences(req) + return s.reportStatus(), s.firstErr +} + +func (s *rpSession) updateReferences(req *packp.ReferenceUpdateRequest) { + for _, cmd := range req.Commands { + exists, err := referenceExists(s.storer, cmd.Name) + if err != nil { + s.setStatus(cmd.Name, err) + continue + } + + switch cmd.Action() { + case packp.Create: + if exists { + s.setStatus(cmd.Name, ErrUpdateReference) + continue + } + + ref := plumbing.NewHashReference(cmd.Name, cmd.New) + err := s.storer.SetReference(ref) + s.setStatus(cmd.Name, err) + case packp.Delete: + if !exists { + s.setStatus(cmd.Name, ErrUpdateReference) + continue + } + + err := s.storer.RemoveReference(cmd.Name) + s.setStatus(cmd.Name, err) + case packp.Update: + if !exists { + s.setStatus(cmd.Name, ErrUpdateReference) + continue + } + + ref := plumbing.NewHashReference(cmd.Name, cmd.New) + err := s.storer.SetReference(ref) + s.setStatus(cmd.Name, err) + } + } +} + +func (s *rpSession) writePackfile(r io.ReadCloser) error { + if r == nil { + return nil + } + + if err := packfile.UpdateObjectStorage(s.storer, r); err != nil { + _ = r.Close() + return err + } + + return r.Close() +} + +func (s *rpSession) setStatus(ref plumbing.ReferenceName, err error) { + s.cmdStatus[ref] = err + if s.firstErr == nil && err != nil { + s.firstErr = err + } +} + +func (s *rpSession) reportStatus() *packp.ReportStatus { + if !s.caps.Supports(capability.ReportStatus) { + return nil + } + + rs := packp.NewReportStatus() + rs.UnpackStatus = "ok" + + if s.unpackErr != nil { + rs.UnpackStatus = s.unpackErr.Error() + } + + if s.cmdStatus == nil { + return rs + } + + for ref, err := range s.cmdStatus { + msg := "ok" + if err != nil { + msg = err.Error() + } + status := &packp.CommandStatus{ + ReferenceName: ref, + Status: msg, + } + rs.CommandStatuses = append(rs.CommandStatuses, status) + } + + return rs +} + +func (*rpSession) setSupportedCapabilities(c *capability.List) error { + if err := c.Set(capability.Agent, capability.DefaultAgent); err != nil { + return err + } + + if err := c.Set(capability.OFSDelta); err != nil { + return err + } + + if err := c.Set(capability.DeleteRefs); err != nil { + return err + } + + return c.Set(capability.ReportStatus) +} + +func setHEAD(s storer.Storer, ar *packp.AdvRefs) error { + ref, err := s.Reference(plumbing.HEAD) + if err == plumbing.ErrReferenceNotFound { + return nil + } + + if err != nil { + return err + } + + if ref.Type() == plumbing.SymbolicReference { + if err := ar.AddReference(ref); err != nil { + return nil + } + + ref, err = storer.ResolveReference(s, ref.Target()) + if err == plumbing.ErrReferenceNotFound { + return nil + } + + if err != nil { + return err + } + } + + if ref.Type() != plumbing.HashReference { + return plumbing.ErrInvalidType + } + + h := ref.Hash() + ar.Head = &h + + return nil +} + +func setReferences(s storer.Storer, ar *packp.AdvRefs) error { + //TODO: add peeled references. + iter, err := s.IterReferences() + if err != nil { + return err + } + + return iter.ForEach(func(ref *plumbing.Reference) error { + if ref.Type() != plumbing.HashReference { + return nil + } + + ar.References[ref.Name().String()] = ref.Hash() + return nil + }) +} + +func referenceExists(s storer.ReferenceStorer, n plumbing.ReferenceName) (bool, error) { + _, err := s.Reference(n) + if err == plumbing.ErrReferenceNotFound { + return false, nil + } + + return err == nil, err +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/auth_method.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/auth_method.go new file mode 100644 index 0000000000..b79a74e41d --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/auth_method.go @@ -0,0 +1,322 @@ +package ssh + +import ( + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "os" + "os/user" + "path/filepath" + + "github.com/go-git/go-git/v5/plumbing/transport" + + "github.com/mitchellh/go-homedir" + "github.com/xanzy/ssh-agent" + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/knownhosts" +) + +const DefaultUsername = "git" + +// AuthMethod is the interface all auth methods for the ssh client +// must implement. The clientConfig method returns the ssh client +// configuration needed to establish an ssh connection. +type AuthMethod interface { + transport.AuthMethod + // ClientConfig should return a valid ssh.ClientConfig to be used to create + // a connection to the SSH server. + ClientConfig() (*ssh.ClientConfig, error) +} + +// The names of the AuthMethod implementations. To be returned by the +// Name() method. Most git servers only allow PublicKeysName and +// PublicKeysCallbackName. +const ( + KeyboardInteractiveName = "ssh-keyboard-interactive" + PasswordName = "ssh-password" + PasswordCallbackName = "ssh-password-callback" + PublicKeysName = "ssh-public-keys" + PublicKeysCallbackName = "ssh-public-key-callback" +) + +// KeyboardInteractive implements AuthMethod by using a +// prompt/response sequence controlled by the server. +type KeyboardInteractive struct { + User string + Challenge ssh.KeyboardInteractiveChallenge + HostKeyCallbackHelper +} + +func (a *KeyboardInteractive) Name() string { + return KeyboardInteractiveName +} + +func (a *KeyboardInteractive) String() string { + return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) +} + +func (a *KeyboardInteractive) ClientConfig() (*ssh.ClientConfig, error) { + return a.SetHostKeyCallback(&ssh.ClientConfig{ + User: a.User, + Auth: []ssh.AuthMethod{ + a.Challenge, + }, + }) +} + +// Password implements AuthMethod by using the given password. +type Password struct { + User string + Password string + HostKeyCallbackHelper +} + +func (a *Password) Name() string { + return PasswordName +} + +func (a *Password) String() string { + return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) +} + +func (a *Password) ClientConfig() (*ssh.ClientConfig, error) { + return a.SetHostKeyCallback(&ssh.ClientConfig{ + User: a.User, + Auth: []ssh.AuthMethod{ssh.Password(a.Password)}, + }) +} + +// PasswordCallback implements AuthMethod by using a callback +// to fetch the password. +type PasswordCallback struct { + User string + Callback func() (pass string, err error) + HostKeyCallbackHelper +} + +func (a *PasswordCallback) Name() string { + return PasswordCallbackName +} + +func (a *PasswordCallback) String() string { + return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) +} + +func (a *PasswordCallback) ClientConfig() (*ssh.ClientConfig, error) { + return a.SetHostKeyCallback(&ssh.ClientConfig{ + User: a.User, + Auth: []ssh.AuthMethod{ssh.PasswordCallback(a.Callback)}, + }) +} + +// PublicKeys implements AuthMethod by using the given key pairs. +type PublicKeys struct { + User string + Signer ssh.Signer + HostKeyCallbackHelper +} + +// NewPublicKeys returns a PublicKeys from a PEM encoded private key. An +// encryption password should be given if the pemBytes contains a password +// encrypted PEM block otherwise password should be empty. It supports RSA +// (PKCS#1), DSA (OpenSSL), and ECDSA private keys. +func NewPublicKeys(user string, pemBytes []byte, password string) (*PublicKeys, error) { + block, _ := pem.Decode(pemBytes) + if block == nil { + return nil, errors.New("invalid PEM data") + } + if x509.IsEncryptedPEMBlock(block) { + key, err := x509.DecryptPEMBlock(block, []byte(password)) + if err != nil { + return nil, err + } + + block = &pem.Block{Type: block.Type, Bytes: key} + pemBytes = pem.EncodeToMemory(block) + } + + signer, err := ssh.ParsePrivateKey(pemBytes) + if err != nil { + return nil, err + } + + return &PublicKeys{User: user, Signer: signer}, nil +} + +// NewPublicKeysFromFile returns a PublicKeys from a file containing a PEM +// encoded private key. An encryption password should be given if the pemBytes +// contains a password encrypted PEM block otherwise password should be empty. +func NewPublicKeysFromFile(user, pemFile, password string) (*PublicKeys, error) { + bytes, err := ioutil.ReadFile(pemFile) + if err != nil { + return nil, err + } + + return NewPublicKeys(user, bytes, password) +} + +func (a *PublicKeys) Name() string { + return PublicKeysName +} + +func (a *PublicKeys) String() string { + return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) +} + +func (a *PublicKeys) ClientConfig() (*ssh.ClientConfig, error) { + return a.SetHostKeyCallback(&ssh.ClientConfig{ + User: a.User, + Auth: []ssh.AuthMethod{ssh.PublicKeys(a.Signer)}, + }) +} + +func username() (string, error) { + var username string + if user, err := user.Current(); err == nil { + username = user.Username + } else { + username = os.Getenv("USER") + } + + if username == "" { + return "", errors.New("failed to get username") + } + + return username, nil +} + +// PublicKeysCallback implements AuthMethod by asking a +// ssh.agent.Agent to act as a signer. +type PublicKeysCallback struct { + User string + Callback func() (signers []ssh.Signer, err error) + HostKeyCallbackHelper +} + +// NewSSHAgentAuth returns a PublicKeysCallback based on a SSH agent, it opens +// a pipe with the SSH agent and uses the pipe as the implementer of the public +// key callback function. +func NewSSHAgentAuth(u string) (*PublicKeysCallback, error) { + var err error + if u == "" { + u, err = username() + if err != nil { + return nil, err + } + } + + a, _, err := sshagent.New() + if err != nil { + return nil, fmt.Errorf("error creating SSH agent: %q", err) + } + + return &PublicKeysCallback{ + User: u, + Callback: a.Signers, + }, nil +} + +func (a *PublicKeysCallback) Name() string { + return PublicKeysCallbackName +} + +func (a *PublicKeysCallback) String() string { + return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) +} + +func (a *PublicKeysCallback) ClientConfig() (*ssh.ClientConfig, error) { + return a.SetHostKeyCallback(&ssh.ClientConfig{ + User: a.User, + Auth: []ssh.AuthMethod{ssh.PublicKeysCallback(a.Callback)}, + }) +} + +// NewKnownHostsCallback returns ssh.HostKeyCallback based on a file based on a +// known_hosts file. http://man.openbsd.org/sshd#SSH_KNOWN_HOSTS_FILE_FORMAT +// +// If list of files is empty, then it will be read from the SSH_KNOWN_HOSTS +// environment variable, example: +// /home/foo/custom_known_hosts_file:/etc/custom_known/hosts_file +// +// If SSH_KNOWN_HOSTS is not set the following file locations will be used: +// ~/.ssh/known_hosts +// /etc/ssh/ssh_known_hosts +func NewKnownHostsCallback(files ...string) (ssh.HostKeyCallback, error) { + var err error + + if len(files) == 0 { + if files, err = getDefaultKnownHostsFiles(); err != nil { + return nil, err + } + } + + if files, err = filterKnownHostsFiles(files...); err != nil { + return nil, err + } + + return knownhosts.New(files...) +} + +func getDefaultKnownHostsFiles() ([]string, error) { + files := filepath.SplitList(os.Getenv("SSH_KNOWN_HOSTS")) + if len(files) != 0 { + return files, nil + } + + homeDirPath, err := homedir.Dir() + if err != nil { + return nil, err + } + + return []string{ + filepath.Join(homeDirPath, "/.ssh/known_hosts"), + "/etc/ssh/ssh_known_hosts", + }, nil +} + +func filterKnownHostsFiles(files ...string) ([]string, error) { + var out []string + for _, file := range files { + _, err := os.Stat(file) + if err == nil { + out = append(out, file) + continue + } + + if !os.IsNotExist(err) { + return nil, err + } + } + + if len(out) == 0 { + return nil, fmt.Errorf("unable to find any valid known_hosts file, set SSH_KNOWN_HOSTS env variable") + } + + return out, nil +} + +// HostKeyCallbackHelper is a helper that provides common functionality to +// configure HostKeyCallback into a ssh.ClientConfig. +type HostKeyCallbackHelper struct { + // HostKeyCallback is the function type used for verifying server keys. + // If nil default callback will be create using NewKnownHostsCallback + // without argument. + HostKeyCallback ssh.HostKeyCallback +} + +// SetHostKeyCallback sets the field HostKeyCallback in the given cfg. If +// HostKeyCallback is empty a default callback is created using +// NewKnownHostsCallback. +func (m *HostKeyCallbackHelper) SetHostKeyCallback(cfg *ssh.ClientConfig) (*ssh.ClientConfig, error) { + var err error + if m.HostKeyCallback == nil { + if m.HostKeyCallback, err = NewKnownHostsCallback(); err != nil { + return cfg, err + } + } + + cfg.HostKeyCallback = m.HostKeyCallback + return cfg, nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go new file mode 100644 index 0000000000..c05ded986d --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go @@ -0,0 +1,228 @@ +// Package ssh implements the SSH transport protocol. +package ssh + +import ( + "context" + "fmt" + "reflect" + "strconv" + + "github.com/go-git/go-git/v5/plumbing/transport" + "github.com/go-git/go-git/v5/plumbing/transport/internal/common" + + "github.com/kevinburke/ssh_config" + "golang.org/x/crypto/ssh" + "golang.org/x/net/proxy" +) + +// DefaultClient is the default SSH client. +var DefaultClient = NewClient(nil) + +// DefaultSSHConfig is the reader used to access parameters stored in the +// system's ssh_config files. If nil all the ssh_config are ignored. +var DefaultSSHConfig sshConfig = ssh_config.DefaultUserSettings + +type sshConfig interface { + Get(alias, key string) string +} + +// NewClient creates a new SSH client with an optional *ssh.ClientConfig. +func NewClient(config *ssh.ClientConfig) transport.Transport { + return common.NewClient(&runner{config: config}) +} + +// DefaultAuthBuilder is the function used to create a default AuthMethod, when +// the user doesn't provide any. +var DefaultAuthBuilder = func(user string) (AuthMethod, error) { + return NewSSHAgentAuth(user) +} + +const DefaultPort = 22 + +type runner struct { + config *ssh.ClientConfig +} + +func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (common.Command, error) { + c := &command{command: cmd, endpoint: ep, config: r.config} + if auth != nil { + c.setAuth(auth) + } + + if err := c.connect(); err != nil { + return nil, err + } + return c, nil +} + +type command struct { + *ssh.Session + connected bool + command string + endpoint *transport.Endpoint + client *ssh.Client + auth AuthMethod + config *ssh.ClientConfig +} + +func (c *command) setAuth(auth transport.AuthMethod) error { + a, ok := auth.(AuthMethod) + if !ok { + return transport.ErrInvalidAuthMethod + } + + c.auth = a + return nil +} + +func (c *command) Start() error { + return c.Session.Start(endpointToCommand(c.command, c.endpoint)) +} + +// Close closes the SSH session and connection. +func (c *command) Close() error { + if !c.connected { + return nil + } + + c.connected = false + + //XXX: If did read the full packfile, then the session might be already + // closed. + _ = c.Session.Close() + + return c.client.Close() +} + +// connect connects to the SSH server, unless a AuthMethod was set with +// SetAuth method, by default uses an auth method based on PublicKeysCallback, +// it connects to a SSH agent, using the address stored in the SSH_AUTH_SOCK +// environment var. +func (c *command) connect() error { + if c.connected { + return transport.ErrAlreadyConnected + } + + if c.auth == nil { + if err := c.setAuthFromEndpoint(); err != nil { + return err + } + } + + var err error + config, err := c.auth.ClientConfig() + if err != nil { + return err + } + + overrideConfig(c.config, config) + + c.client, err = dial("tcp", c.getHostWithPort(), config) + if err != nil { + return err + } + + c.Session, err = c.client.NewSession() + if err != nil { + _ = c.client.Close() + return err + } + + c.connected = true + return nil +} + +func dial(network, addr string, config *ssh.ClientConfig) (*ssh.Client, error) { + var ( + ctx = context.Background() + cancel context.CancelFunc + ) + if config.Timeout > 0 { + ctx, cancel = context.WithTimeout(ctx, config.Timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer cancel() + + conn, err := proxy.Dial(ctx, network, addr) + if err != nil { + return nil, err + } + c, chans, reqs, err := ssh.NewClientConn(conn, addr, config) + if err != nil { + return nil, err + } + return ssh.NewClient(c, chans, reqs), nil +} + +func (c *command) getHostWithPort() string { + if addr, found := c.doGetHostWithPortFromSSHConfig(); found { + return addr + } + + host := c.endpoint.Host + port := c.endpoint.Port + if port <= 0 { + port = DefaultPort + } + + return fmt.Sprintf("%s:%d", host, port) +} + +func (c *command) doGetHostWithPortFromSSHConfig() (addr string, found bool) { + if DefaultSSHConfig == nil { + return + } + + host := c.endpoint.Host + port := c.endpoint.Port + + configHost := DefaultSSHConfig.Get(c.endpoint.Host, "Hostname") + if configHost != "" { + host = configHost + found = true + } + + if !found { + return + } + + configPort := DefaultSSHConfig.Get(c.endpoint.Host, "Port") + if configPort != "" { + if i, err := strconv.Atoi(configPort); err == nil { + port = i + } + } + + addr = fmt.Sprintf("%s:%d", host, port) + return +} + +func (c *command) setAuthFromEndpoint() error { + var err error + c.auth, err = DefaultAuthBuilder(c.endpoint.User) + return err +} + +func endpointToCommand(cmd string, ep *transport.Endpoint) string { + return fmt.Sprintf("%s '%s'", cmd, ep.Path) +} + +func overrideConfig(overrides *ssh.ClientConfig, c *ssh.ClientConfig) { + if overrides == nil { + return + } + + t := reflect.TypeOf(*c) + vc := reflect.ValueOf(c).Elem() + vo := reflect.ValueOf(overrides).Elem() + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + vcf := vc.FieldByName(f.Name) + vof := vo.FieldByName(f.Name) + vcf.Set(vof) + } + + *c = vc.Interface().(ssh.ClientConfig) +} diff --git a/vendor/github.com/go-git/go-git/v5/prune.go b/vendor/github.com/go-git/go-git/v5/prune.go new file mode 100644 index 0000000000..cc5907a142 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/prune.go @@ -0,0 +1,66 @@ +package git + +import ( + "errors" + "time" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/storer" +) + +type PruneHandler func(unreferencedObjectHash plumbing.Hash) error +type PruneOptions struct { + // OnlyObjectsOlderThan if set to non-zero value + // selects only objects older than the time provided. + OnlyObjectsOlderThan time.Time + // Handler is called on matching objects + Handler PruneHandler +} + +var ErrLooseObjectsNotSupported = errors.New("Loose objects not supported") + +// DeleteObject deletes an object from a repository. +// The type conveniently matches PruneHandler. +func (r *Repository) DeleteObject(hash plumbing.Hash) error { + los, ok := r.Storer.(storer.LooseObjectStorer) + if !ok { + return ErrLooseObjectsNotSupported + } + + return los.DeleteLooseObject(hash) +} + +func (r *Repository) Prune(opt PruneOptions) error { + los, ok := r.Storer.(storer.LooseObjectStorer) + if !ok { + return ErrLooseObjectsNotSupported + } + + pw := newObjectWalker(r.Storer) + err := pw.walkAllRefs() + if err != nil { + return err + } + // Now walk all (loose) objects in storage. + return los.ForEachObjectHash(func(hash plumbing.Hash) error { + // Get out if we have seen this object. + if pw.isSeen(hash) { + return nil + } + // Otherwise it is a candidate for pruning. + // Check out for too new objects next. + if !opt.OnlyObjectsOlderThan.IsZero() { + // Errors here are non-fatal. The object may be e.g. packed. + // Or concurrently deleted. Skip such objects. + t, err := los.LooseObjectTime(hash) + if err != nil { + return nil + } + // Skip too new objects. + if !t.Before(opt.OnlyObjectsOlderThan) { + return nil + } + } + return opt.Handler(hash) + }) +} diff --git a/vendor/github.com/go-git/go-git/v5/references.go b/vendor/github.com/go-git/go-git/v5/references.go new file mode 100644 index 0000000000..6d96035afa --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/references.go @@ -0,0 +1,264 @@ +package git + +import ( + "io" + "sort" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/object" + "github.com/go-git/go-git/v5/utils/diff" + + "github.com/sergi/go-diff/diffmatchpatch" +) + +// References returns a slice of Commits for the file at "path", starting from +// the commit provided that contains the file from the provided path. The last +// commit into the returned slice is the commit where the file was created. +// If the provided commit does not contains the specified path, a nil slice is +// returned. The commits are sorted in commit order, newer to older. +// +// Caveats: +// +// - Moves and copies are not currently supported. +// +// - Cherry-picks are not detected unless there are no commits between them and +// therefore can appear repeated in the list. (see git path-id for hints on how +// to fix this). +func references(c *object.Commit, path string) ([]*object.Commit, error) { + var result []*object.Commit + seen := make(map[plumbing.Hash]struct{}) + if err := walkGraph(&result, &seen, c, path); err != nil { + return nil, err + } + + // TODO result should be returned without ordering + sortCommits(result) + + // for merges of identical cherry-picks + return removeComp(path, result, equivalent) +} + +type commitSorterer struct { + l []*object.Commit +} + +func (s commitSorterer) Len() int { + return len(s.l) +} + +func (s commitSorterer) Less(i, j int) bool { + return s.l[i].Committer.When.Before(s.l[j].Committer.When) || + s.l[i].Committer.When.Equal(s.l[j].Committer.When) && + s.l[i].Author.When.Before(s.l[j].Author.When) +} + +func (s commitSorterer) Swap(i, j int) { + s.l[i], s.l[j] = s.l[j], s.l[i] +} + +// SortCommits sorts a commit list by commit date, from older to newer. +func sortCommits(l []*object.Commit) { + s := &commitSorterer{l} + sort.Sort(s) +} + +// Recursive traversal of the commit graph, generating a linear history of the +// path. +func walkGraph(result *[]*object.Commit, seen *map[plumbing.Hash]struct{}, current *object.Commit, path string) error { + // check and update seen + if _, ok := (*seen)[current.Hash]; ok { + return nil + } + (*seen)[current.Hash] = struct{}{} + + // if the path is not in the current commit, stop searching. + if _, err := current.File(path); err != nil { + return nil + } + + // optimization: don't traverse branches that does not + // contain the path. + parents, err := parentsContainingPath(path, current) + if err != nil { + return err + } + switch len(parents) { + // if the path is not found in any of its parents, the path was + // created by this commit; we must add it to the revisions list and + // stop searching. This includes the case when current is the + // initial commit. + case 0: + *result = append(*result, current) + return nil + case 1: // only one parent contains the path + // if the file contents has change, add the current commit + different, err := differentContents(path, current, parents) + if err != nil { + return err + } + if len(different) == 1 { + *result = append(*result, current) + } + // in any case, walk the parent + return walkGraph(result, seen, parents[0], path) + default: // more than one parent contains the path + // TODO: detect merges that had a conflict, because they must be + // included in the result here. + for _, p := range parents { + err := walkGraph(result, seen, p, path) + if err != nil { + return err + } + } + } + return nil +} + +func parentsContainingPath(path string, c *object.Commit) ([]*object.Commit, error) { + // TODO: benchmark this method making git.object.Commit.parent public instead of using + // an iterator + var result []*object.Commit + iter := c.Parents() + for { + parent, err := iter.Next() + if err == io.EOF { + return result, nil + } + if err != nil { + return nil, err + } + if _, err := parent.File(path); err == nil { + result = append(result, parent) + } + } +} + +// Returns an slice of the commits in "cs" that has the file "path", but with different +// contents than what can be found in "c". +func differentContents(path string, c *object.Commit, cs []*object.Commit) ([]*object.Commit, error) { + result := make([]*object.Commit, 0, len(cs)) + h, found := blobHash(path, c) + if !found { + return nil, object.ErrFileNotFound + } + for _, cx := range cs { + if hx, found := blobHash(path, cx); found && h != hx { + result = append(result, cx) + } + } + return result, nil +} + +// blobHash returns the hash of a path in a commit +func blobHash(path string, commit *object.Commit) (hash plumbing.Hash, found bool) { + file, err := commit.File(path) + if err != nil { + var empty plumbing.Hash + return empty, found + } + return file.Hash, true +} + +type contentsComparatorFn func(path string, a, b *object.Commit) (bool, error) + +// Returns a new slice of commits, with duplicates removed. Expects a +// sorted commit list. Duplication is defined according to "comp". It +// will always keep the first commit of a series of duplicated commits. +func removeComp(path string, cs []*object.Commit, comp contentsComparatorFn) ([]*object.Commit, error) { + result := make([]*object.Commit, 0, len(cs)) + if len(cs) == 0 { + return result, nil + } + result = append(result, cs[0]) + for i := 1; i < len(cs); i++ { + equals, err := comp(path, cs[i], cs[i-1]) + if err != nil { + return nil, err + } + if !equals { + result = append(result, cs[i]) + } + } + return result, nil +} + +// Equivalent commits are commits whose patch is the same. +func equivalent(path string, a, b *object.Commit) (bool, error) { + numParentsA := a.NumParents() + numParentsB := b.NumParents() + + // the first commit is not equivalent to anyone + // and "I think" merges can not be equivalent to anything + if numParentsA != 1 || numParentsB != 1 { + return false, nil + } + + diffsA, err := patch(a, path) + if err != nil { + return false, err + } + diffsB, err := patch(b, path) + if err != nil { + return false, err + } + + return sameDiffs(diffsA, diffsB), nil +} + +func patch(c *object.Commit, path string) ([]diffmatchpatch.Diff, error) { + // get contents of the file in the commit + file, err := c.File(path) + if err != nil { + return nil, err + } + content, err := file.Contents() + if err != nil { + return nil, err + } + + // get contents of the file in the first parent of the commit + var contentParent string + iter := c.Parents() + parent, err := iter.Next() + if err != nil { + return nil, err + } + file, err = parent.File(path) + if err != nil { + contentParent = "" + } else { + contentParent, err = file.Contents() + if err != nil { + return nil, err + } + } + + // compare the contents of parent and child + return diff.Do(content, contentParent), nil +} + +func sameDiffs(a, b []diffmatchpatch.Diff) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if !sameDiff(a[i], b[i]) { + return false + } + } + return true +} + +func sameDiff(a, b diffmatchpatch.Diff) bool { + if a.Type != b.Type { + return false + } + switch a.Type { + case 0: + return countLines(a.Text) == countLines(b.Text) + case 1, -1: + return a.Text == b.Text + default: + panic("unreachable") + } +} diff --git a/vendor/github.com/go-git/go-git/v5/remote.go b/vendor/github.com/go-git/go-git/v5/remote.go new file mode 100644 index 0000000000..d88e8e6ff6 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/remote.go @@ -0,0 +1,1166 @@ +package git + +import ( + "context" + "errors" + "fmt" + "io" + + "github.com/go-git/go-billy/v5/osfs" + "github.com/go-git/go-git/v5/config" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/cache" + "github.com/go-git/go-git/v5/plumbing/format/packfile" + "github.com/go-git/go-git/v5/plumbing/object" + "github.com/go-git/go-git/v5/plumbing/protocol/packp" + "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" + "github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband" + "github.com/go-git/go-git/v5/plumbing/revlist" + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/plumbing/transport" + "github.com/go-git/go-git/v5/plumbing/transport/client" + "github.com/go-git/go-git/v5/storage" + "github.com/go-git/go-git/v5/storage/filesystem" + "github.com/go-git/go-git/v5/storage/memory" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +var ( + NoErrAlreadyUpToDate = errors.New("already up-to-date") + ErrDeleteRefNotSupported = errors.New("server does not support delete-refs") + ErrForceNeeded = errors.New("some refs were not updated") + ErrExactSHA1NotSupported = errors.New("server does not support exact SHA1 refspec") +) + +type NoMatchingRefSpecError struct { + refSpec config.RefSpec +} + +func (e NoMatchingRefSpecError) Error() string { + return fmt.Sprintf("couldn't find remote ref %q", e.refSpec.Src()) +} + +func (e NoMatchingRefSpecError) Is(target error) bool { + _, ok := target.(NoMatchingRefSpecError) + return ok +} + +const ( + // This describes the maximum number of commits to walk when + // computing the haves to send to a server, for each ref in the + // repo containing this remote, when not using the multi-ack + // protocol. Setting this to 0 means there is no limit. + maxHavesToVisitPerRef = 100 +) + +// Remote represents a connection to a remote repository. +type Remote struct { + c *config.RemoteConfig + s storage.Storer +} + +// NewRemote creates a new Remote. +// The intended purpose is to use the Remote for tasks such as listing remote references (like using git ls-remote). +// Otherwise Remotes should be created via the use of a Repository. +func NewRemote(s storage.Storer, c *config.RemoteConfig) *Remote { + return &Remote{s: s, c: c} +} + +// Config returns the RemoteConfig object used to instantiate this Remote. +func (r *Remote) Config() *config.RemoteConfig { + return r.c +} + +func (r *Remote) String() string { + var fetch, push string + if len(r.c.URLs) > 0 { + fetch = r.c.URLs[0] + push = r.c.URLs[0] + } + + return fmt.Sprintf("%s\t%s (fetch)\n%[1]s\t%[3]s (push)", r.c.Name, fetch, push) +} + +// Push performs a push to the remote. Returns NoErrAlreadyUpToDate if the +// remote was already up-to-date. +func (r *Remote) Push(o *PushOptions) error { + return r.PushContext(context.Background(), o) +} + +// PushContext performs a push to the remote. Returns NoErrAlreadyUpToDate if +// the remote was already up-to-date. +// +// The provided Context must be non-nil. If the context expires before the +// operation is complete, an error is returned. The context only affects to the +// transport operations. +func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) { + if err := o.Validate(); err != nil { + return err + } + + if o.RemoteName != r.c.Name { + return fmt.Errorf("remote names don't match: %s != %s", o.RemoteName, r.c.Name) + } + + s, err := newSendPackSession(r.c.URLs[0], o.Auth) + if err != nil { + return err + } + + defer ioutil.CheckClose(s, &err) + + ar, err := s.AdvertisedReferences() + if err != nil { + return err + } + + remoteRefs, err := ar.AllReferences() + if err != nil { + return err + } + + isDelete := false + allDelete := true + for _, rs := range o.RefSpecs { + if rs.IsDelete() { + isDelete = true + } else { + allDelete = false + } + if isDelete && !allDelete { + break + } + } + + if isDelete && !ar.Capabilities.Supports(capability.DeleteRefs) { + return ErrDeleteRefNotSupported + } + + if o.Force { + for i := 0; i < len(o.RefSpecs); i++ { + rs := &o.RefSpecs[i] + if !rs.IsForceUpdate() && !rs.IsDelete() { + o.RefSpecs[i] = config.RefSpec("+" + rs.String()) + } + } + } + + localRefs, err := r.references() + if err != nil { + return err + } + + req, err := r.newReferenceUpdateRequest(o, localRefs, remoteRefs, ar) + if err != nil { + return err + } + + if len(req.Commands) == 0 { + return NoErrAlreadyUpToDate + } + + objects := objectsToPush(req.Commands) + + haves, err := referencesToHashes(remoteRefs) + if err != nil { + return err + } + + stop, err := r.s.Shallow() + if err != nil { + return err + } + + // if we have shallow we should include this as part of the objects that + // we are aware. + haves = append(haves, stop...) + + var hashesToPush []plumbing.Hash + // Avoid the expensive revlist operation if we're only doing deletes. + if !allDelete { + if r.c.IsFirstURLLocal() { + // If we're are pushing to a local repo, it might be much + // faster to use a local storage layer to get the commits + // to ignore, when calculating the object revlist. + localStorer := filesystem.NewStorage( + osfs.New(r.c.URLs[0]), cache.NewObjectLRUDefault()) + hashesToPush, err = revlist.ObjectsWithStorageForIgnores( + r.s, localStorer, objects, haves) + } else { + hashesToPush, err = revlist.Objects(r.s, objects, haves) + } + if err != nil { + return err + } + } + + if len(hashesToPush) == 0 { + allDelete = true + for _, command := range req.Commands { + if command.Action() != packp.Delete { + allDelete = false + break + } + } + } + + rs, err := pushHashes(ctx, s, r.s, req, hashesToPush, r.useRefDeltas(ar), allDelete) + if err != nil { + return err + } + + if err = rs.Error(); err != nil { + return err + } + + return r.updateRemoteReferenceStorage(req, rs) +} + +func (r *Remote) useRefDeltas(ar *packp.AdvRefs) bool { + return !ar.Capabilities.Supports(capability.OFSDelta) +} + +func (r *Remote) newReferenceUpdateRequest( + o *PushOptions, + localRefs []*plumbing.Reference, + remoteRefs storer.ReferenceStorer, + ar *packp.AdvRefs, +) (*packp.ReferenceUpdateRequest, error) { + req := packp.NewReferenceUpdateRequestFromCapabilities(ar.Capabilities) + + if o.Progress != nil { + req.Progress = o.Progress + if ar.Capabilities.Supports(capability.Sideband64k) { + _ = req.Capabilities.Set(capability.Sideband64k) + } else if ar.Capabilities.Supports(capability.Sideband) { + _ = req.Capabilities.Set(capability.Sideband) + } + } + + if err := r.addReferencesToUpdate(o.RefSpecs, localRefs, remoteRefs, req, o.Prune); err != nil { + return nil, err + } + + return req, nil +} + +func (r *Remote) updateRemoteReferenceStorage( + req *packp.ReferenceUpdateRequest, + result *packp.ReportStatus, +) error { + + for _, spec := range r.c.Fetch { + for _, c := range req.Commands { + if !spec.Match(c.Name) { + continue + } + + local := spec.Dst(c.Name) + ref := plumbing.NewHashReference(local, c.New) + switch c.Action() { + case packp.Create, packp.Update: + if err := r.s.SetReference(ref); err != nil { + return err + } + case packp.Delete: + if err := r.s.RemoveReference(local); err != nil { + return err + } + } + } + } + + return nil +} + +// FetchContext fetches references along with the objects necessary to complete +// their histories. +// +// Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are +// no changes to be fetched, or an error. +// +// The provided Context must be non-nil. If the context expires before the +// operation is complete, an error is returned. The context only affects to the +// transport operations. +func (r *Remote) FetchContext(ctx context.Context, o *FetchOptions) error { + _, err := r.fetch(ctx, o) + return err +} + +// Fetch fetches references along with the objects necessary to complete their +// histories. +// +// Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are +// no changes to be fetched, or an error. +func (r *Remote) Fetch(o *FetchOptions) error { + return r.FetchContext(context.Background(), o) +} + +func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.ReferenceStorer, err error) { + if o.RemoteName == "" { + o.RemoteName = r.c.Name + } + + if err = o.Validate(); err != nil { + return nil, err + } + + if len(o.RefSpecs) == 0 { + o.RefSpecs = r.c.Fetch + } + + s, err := newUploadPackSession(r.c.URLs[0], o.Auth) + if err != nil { + return nil, err + } + + defer ioutil.CheckClose(s, &err) + + ar, err := s.AdvertisedReferences() + if err != nil { + return nil, err + } + + req, err := r.newUploadPackRequest(o, ar) + if err != nil { + return nil, err + } + + if err := r.isSupportedRefSpec(o.RefSpecs, ar); err != nil { + return nil, err + } + + remoteRefs, err := ar.AllReferences() + if err != nil { + return nil, err + } + + localRefs, err := r.references() + if err != nil { + return nil, err + } + + refs, err := calculateRefs(o.RefSpecs, remoteRefs, o.Tags) + if err != nil { + return nil, err + } + + req.Wants, err = getWants(r.s, refs) + if len(req.Wants) > 0 { + req.Haves, err = getHaves(localRefs, remoteRefs, r.s) + if err != nil { + return nil, err + } + + if err = r.fetchPack(ctx, o, s, req); err != nil { + return nil, err + } + } + + updated, err := r.updateLocalReferenceStorage(o.RefSpecs, refs, remoteRefs, o.Tags, o.Force) + if err != nil { + return nil, err + } + + if !updated { + return remoteRefs, NoErrAlreadyUpToDate + } + + return remoteRefs, nil +} + +func newUploadPackSession(url string, auth transport.AuthMethod) (transport.UploadPackSession, error) { + c, ep, err := newClient(url) + if err != nil { + return nil, err + } + + return c.NewUploadPackSession(ep, auth) +} + +func newSendPackSession(url string, auth transport.AuthMethod) (transport.ReceivePackSession, error) { + c, ep, err := newClient(url) + if err != nil { + return nil, err + } + + return c.NewReceivePackSession(ep, auth) +} + +func newClient(url string) (transport.Transport, *transport.Endpoint, error) { + ep, err := transport.NewEndpoint(url) + if err != nil { + return nil, nil, err + } + + c, err := client.NewClient(ep) + if err != nil { + return nil, nil, err + } + + return c, ep, err +} + +func (r *Remote) fetchPack(ctx context.Context, o *FetchOptions, s transport.UploadPackSession, + req *packp.UploadPackRequest) (err error) { + + reader, err := s.UploadPack(ctx, req) + if err != nil { + return err + } + + defer ioutil.CheckClose(reader, &err) + + if err = r.updateShallow(o, reader); err != nil { + return err + } + + if err = packfile.UpdateObjectStorage(r.s, + buildSidebandIfSupported(req.Capabilities, reader, o.Progress), + ); err != nil { + return err + } + + return err +} + +func (r *Remote) addReferencesToUpdate( + refspecs []config.RefSpec, + localRefs []*plumbing.Reference, + remoteRefs storer.ReferenceStorer, + req *packp.ReferenceUpdateRequest, + prune bool, +) error { + // This references dictionary will be used to search references by name. + refsDict := make(map[string]*plumbing.Reference) + for _, ref := range localRefs { + refsDict[ref.Name().String()] = ref + } + + for _, rs := range refspecs { + if rs.IsDelete() { + if err := r.deleteReferences(rs, remoteRefs, refsDict, req, false); err != nil { + return err + } + } else { + err := r.addOrUpdateReferences(rs, localRefs, refsDict, remoteRefs, req) + if err != nil { + return err + } + + if prune { + if err := r.deleteReferences(rs, remoteRefs, refsDict, req, true); err != nil { + return err + } + } + } + } + + return nil +} + +func (r *Remote) addOrUpdateReferences( + rs config.RefSpec, + localRefs []*plumbing.Reference, + refsDict map[string]*plumbing.Reference, + remoteRefs storer.ReferenceStorer, + req *packp.ReferenceUpdateRequest, +) error { + // If it is not a wilcard refspec we can directly search for the reference + // in the references dictionary. + if !rs.IsWildcard() { + ref, ok := refsDict[rs.Src()] + if !ok { + return nil + } + + return r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req) + } + + for _, ref := range localRefs { + err := r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req) + if err != nil { + return err + } + } + + return nil +} + +func (r *Remote) deleteReferences(rs config.RefSpec, + remoteRefs storer.ReferenceStorer, + refsDict map[string]*plumbing.Reference, + req *packp.ReferenceUpdateRequest, + prune bool) error { + iter, err := remoteRefs.IterReferences() + if err != nil { + return err + } + + return iter.ForEach(func(ref *plumbing.Reference) error { + if ref.Type() != plumbing.HashReference { + return nil + } + + if prune { + rs := rs.Reverse() + if !rs.Match(ref.Name()) { + return nil + } + + if _, ok := refsDict[rs.Dst(ref.Name()).String()]; ok { + return nil + } + } else if rs.Dst("") != ref.Name() { + return nil + } + + cmd := &packp.Command{ + Name: ref.Name(), + Old: ref.Hash(), + New: plumbing.ZeroHash, + } + req.Commands = append(req.Commands, cmd) + return nil + }) +} + +func (r *Remote) addReferenceIfRefSpecMatches(rs config.RefSpec, + remoteRefs storer.ReferenceStorer, localRef *plumbing.Reference, + req *packp.ReferenceUpdateRequest) error { + + if localRef.Type() != plumbing.HashReference { + return nil + } + + if !rs.Match(localRef.Name()) { + return nil + } + + cmd := &packp.Command{ + Name: rs.Dst(localRef.Name()), + Old: plumbing.ZeroHash, + New: localRef.Hash(), + } + + remoteRef, err := remoteRefs.Reference(cmd.Name) + if err == nil { + if remoteRef.Type() != plumbing.HashReference { + //TODO: check actual git behavior here + return nil + } + + cmd.Old = remoteRef.Hash() + } else if err != plumbing.ErrReferenceNotFound { + return err + } + + if cmd.Old == cmd.New { + return nil + } + + if !rs.IsForceUpdate() { + if err := checkFastForwardUpdate(r.s, remoteRefs, cmd); err != nil { + return err + } + } + + req.Commands = append(req.Commands, cmd) + return nil +} + +func (r *Remote) references() ([]*plumbing.Reference, error) { + var localRefs []*plumbing.Reference + + iter, err := r.s.IterReferences() + if err != nil { + return nil, err + } + + for { + ref, err := iter.Next() + if err == io.EOF { + break + } + + if err != nil { + return nil, err + } + + localRefs = append(localRefs, ref) + } + + return localRefs, nil +} + +func getRemoteRefsFromStorer(remoteRefStorer storer.ReferenceStorer) ( + map[plumbing.Hash]bool, error) { + remoteRefs := map[plumbing.Hash]bool{} + iter, err := remoteRefStorer.IterReferences() + if err != nil { + return nil, err + } + err = iter.ForEach(func(ref *plumbing.Reference) error { + if ref.Type() != plumbing.HashReference { + return nil + } + remoteRefs[ref.Hash()] = true + return nil + }) + if err != nil { + return nil, err + } + return remoteRefs, nil +} + +// getHavesFromRef populates the given `haves` map with the given +// reference, and up to `maxHavesToVisitPerRef` ancestor commits. +func getHavesFromRef( + ref *plumbing.Reference, + remoteRefs map[plumbing.Hash]bool, + s storage.Storer, + haves map[plumbing.Hash]bool, +) error { + h := ref.Hash() + if haves[h] { + return nil + } + + // No need to load the commit if we know the remote already + // has this hash. + if remoteRefs[h] { + haves[h] = true + return nil + } + + commit, err := object.GetCommit(s, h) + if err != nil { + // Ignore the error if this isn't a commit. + haves[ref.Hash()] = true + return nil + } + + // Until go-git supports proper commit negotiation during an + // upload pack request, include up to `maxHavesToVisitPerRef` + // commits from the history of each ref. + walker := object.NewCommitPreorderIter(commit, haves, nil) + toVisit := maxHavesToVisitPerRef + return walker.ForEach(func(c *object.Commit) error { + haves[c.Hash] = true + toVisit-- + // If toVisit starts out at 0 (indicating there is no + // max), then it will be negative here and we won't stop + // early. + if toVisit == 0 || remoteRefs[c.Hash] { + return storer.ErrStop + } + return nil + }) +} + +func getHaves( + localRefs []*plumbing.Reference, + remoteRefStorer storer.ReferenceStorer, + s storage.Storer, +) ([]plumbing.Hash, error) { + haves := map[plumbing.Hash]bool{} + + // Build a map of all the remote references, to avoid loading too + // many parent commits for references we know don't need to be + // transferred. + remoteRefs, err := getRemoteRefsFromStorer(remoteRefStorer) + if err != nil { + return nil, err + } + + for _, ref := range localRefs { + if haves[ref.Hash()] { + continue + } + + if ref.Type() != plumbing.HashReference { + continue + } + + err = getHavesFromRef(ref, remoteRefs, s, haves) + if err != nil { + return nil, err + } + } + + var result []plumbing.Hash + for h := range haves { + result = append(result, h) + } + + return result, nil +} + +const refspecAllTags = "+refs/tags/*:refs/tags/*" + +func calculateRefs( + spec []config.RefSpec, + remoteRefs storer.ReferenceStorer, + tagMode TagMode, +) (memory.ReferenceStorage, error) { + if tagMode == AllTags { + spec = append(spec, refspecAllTags) + } + + refs := make(memory.ReferenceStorage) + for _, s := range spec { + if err := doCalculateRefs(s, remoteRefs, refs); err != nil { + return nil, err + } + } + + return refs, nil +} + +func doCalculateRefs( + s config.RefSpec, + remoteRefs storer.ReferenceStorer, + refs memory.ReferenceStorage, +) error { + iter, err := remoteRefs.IterReferences() + if err != nil { + return err + } + + if s.IsExactSHA1() { + ref := plumbing.NewHashReference(s.Dst(""), plumbing.NewHash(s.Src())) + return refs.SetReference(ref) + } + + var matched bool + err = iter.ForEach(func(ref *plumbing.Reference) error { + if !s.Match(ref.Name()) { + return nil + } + + if ref.Type() == plumbing.SymbolicReference { + target, err := storer.ResolveReference(remoteRefs, ref.Name()) + if err != nil { + return err + } + + ref = plumbing.NewHashReference(ref.Name(), target.Hash()) + } + + if ref.Type() != plumbing.HashReference { + return nil + } + + matched = true + if err := refs.SetReference(ref); err != nil { + return err + } + + if !s.IsWildcard() { + return storer.ErrStop + } + + return nil + }) + + if !matched && !s.IsWildcard() { + return NoMatchingRefSpecError{refSpec: s} + } + + return err +} + +func getWants(localStorer storage.Storer, refs memory.ReferenceStorage) ([]plumbing.Hash, error) { + wants := map[plumbing.Hash]bool{} + for _, ref := range refs { + hash := ref.Hash() + exists, err := objectExists(localStorer, ref.Hash()) + if err != nil { + return nil, err + } + + if !exists { + wants[hash] = true + } + } + + var result []plumbing.Hash + for h := range wants { + result = append(result, h) + } + + return result, nil +} + +func objectExists(s storer.EncodedObjectStorer, h plumbing.Hash) (bool, error) { + _, err := s.EncodedObject(plumbing.AnyObject, h) + if err == plumbing.ErrObjectNotFound { + return false, nil + } + + return true, err +} + +func checkFastForwardUpdate(s storer.EncodedObjectStorer, remoteRefs storer.ReferenceStorer, cmd *packp.Command) error { + if cmd.Old == plumbing.ZeroHash { + _, err := remoteRefs.Reference(cmd.Name) + if err == plumbing.ErrReferenceNotFound { + return nil + } + + if err != nil { + return err + } + + return fmt.Errorf("non-fast-forward update: %s", cmd.Name.String()) + } + + ff, err := isFastForward(s, cmd.Old, cmd.New) + if err != nil { + return err + } + + if !ff { + return fmt.Errorf("non-fast-forward update: %s", cmd.Name.String()) + } + + return nil +} + +func isFastForward(s storer.EncodedObjectStorer, old, new plumbing.Hash) (bool, error) { + c, err := object.GetCommit(s, new) + if err != nil { + return false, err + } + + found := false + iter := object.NewCommitPreorderIter(c, nil, nil) + err = iter.ForEach(func(c *object.Commit) error { + if c.Hash != old { + return nil + } + + found = true + return storer.ErrStop + }) + return found, err +} + +func (r *Remote) newUploadPackRequest(o *FetchOptions, + ar *packp.AdvRefs) (*packp.UploadPackRequest, error) { + + req := packp.NewUploadPackRequestFromCapabilities(ar.Capabilities) + + if o.Depth != 0 { + req.Depth = packp.DepthCommits(o.Depth) + if err := req.Capabilities.Set(capability.Shallow); err != nil { + return nil, err + } + } + + if o.Progress == nil && ar.Capabilities.Supports(capability.NoProgress) { + if err := req.Capabilities.Set(capability.NoProgress); err != nil { + return nil, err + } + } + + isWildcard := true + for _, s := range o.RefSpecs { + if !s.IsWildcard() { + isWildcard = false + break + } + } + + if isWildcard && o.Tags == TagFollowing && ar.Capabilities.Supports(capability.IncludeTag) { + if err := req.Capabilities.Set(capability.IncludeTag); err != nil { + return nil, err + } + } + + return req, nil +} + +func (r *Remote) isSupportedRefSpec(refs []config.RefSpec, ar *packp.AdvRefs) error { + var containsIsExact bool + for _, ref := range refs { + if ref.IsExactSHA1() { + containsIsExact = true + } + } + + if !containsIsExact { + return nil + } + + if ar.Capabilities.Supports(capability.AllowReachableSHA1InWant) || + ar.Capabilities.Supports(capability.AllowTipSHA1InWant) { + return nil + } + + return ErrExactSHA1NotSupported +} + +func buildSidebandIfSupported(l *capability.List, reader io.Reader, p sideband.Progress) io.Reader { + var t sideband.Type + + switch { + case l.Supports(capability.Sideband): + t = sideband.Sideband + case l.Supports(capability.Sideband64k): + t = sideband.Sideband64k + default: + return reader + } + + d := sideband.NewDemuxer(t, reader) + d.Progress = p + + return d +} + +func (r *Remote) updateLocalReferenceStorage( + specs []config.RefSpec, + fetchedRefs, remoteRefs memory.ReferenceStorage, + tagMode TagMode, + force bool, +) (updated bool, err error) { + isWildcard := true + forceNeeded := false + + for _, spec := range specs { + if !spec.IsWildcard() { + isWildcard = false + } + + for _, ref := range fetchedRefs { + if !spec.Match(ref.Name()) && !spec.IsExactSHA1() { + continue + } + + if ref.Type() != plumbing.HashReference { + continue + } + + localName := spec.Dst(ref.Name()) + old, _ := storer.ResolveReference(r.s, localName) + new := plumbing.NewHashReference(localName, ref.Hash()) + + // If the ref exists locally as a branch and force is not specified, + // only update if the new ref is an ancestor of the old + if old != nil && old.Name().IsBranch() && !force && !spec.IsForceUpdate() { + ff, err := isFastForward(r.s, old.Hash(), new.Hash()) + if err != nil { + return updated, err + } + + if !ff { + forceNeeded = true + continue + } + } + + refUpdated, err := checkAndUpdateReferenceStorerIfNeeded(r.s, new, old) + if err != nil { + return updated, err + } + + if refUpdated { + updated = true + } + } + } + + if tagMode == NoTags { + return updated, nil + } + + tags := fetchedRefs + if isWildcard { + tags = remoteRefs + } + tagUpdated, err := r.buildFetchedTags(tags) + if err != nil { + return updated, err + } + + if tagUpdated { + updated = true + } + + if forceNeeded { + err = ErrForceNeeded + } + + return +} + +func (r *Remote) buildFetchedTags(refs memory.ReferenceStorage) (updated bool, err error) { + for _, ref := range refs { + if !ref.Name().IsTag() { + continue + } + + _, err := r.s.EncodedObject(plumbing.AnyObject, ref.Hash()) + if err == plumbing.ErrObjectNotFound { + continue + } + + if err != nil { + return false, err + } + + refUpdated, err := updateReferenceStorerIfNeeded(r.s, ref) + if err != nil { + return updated, err + } + + if refUpdated { + updated = true + } + } + + return +} + +// List the references on the remote repository. +func (r *Remote) List(o *ListOptions) (rfs []*plumbing.Reference, err error) { + s, err := newUploadPackSession(r.c.URLs[0], o.Auth) + if err != nil { + return nil, err + } + + defer ioutil.CheckClose(s, &err) + + ar, err := s.AdvertisedReferences() + if err != nil { + return nil, err + } + + allRefs, err := ar.AllReferences() + if err != nil { + return nil, err + } + + refs, err := allRefs.IterReferences() + if err != nil { + return nil, err + } + + var resultRefs []*plumbing.Reference + err = refs.ForEach(func(ref *plumbing.Reference) error { + resultRefs = append(resultRefs, ref) + return nil + }) + if err != nil { + return nil, err + } + return resultRefs, nil +} + +func objectsToPush(commands []*packp.Command) []plumbing.Hash { + objects := make([]plumbing.Hash, 0, len(commands)) + for _, cmd := range commands { + if cmd.New == plumbing.ZeroHash { + continue + } + objects = append(objects, cmd.New) + } + return objects +} + +func referencesToHashes(refs storer.ReferenceStorer) ([]plumbing.Hash, error) { + iter, err := refs.IterReferences() + if err != nil { + return nil, err + } + + var hs []plumbing.Hash + err = iter.ForEach(func(ref *plumbing.Reference) error { + if ref.Type() != plumbing.HashReference { + return nil + } + + hs = append(hs, ref.Hash()) + return nil + }) + if err != nil { + return nil, err + } + + return hs, nil +} + +func pushHashes( + ctx context.Context, + sess transport.ReceivePackSession, + s storage.Storer, + req *packp.ReferenceUpdateRequest, + hs []plumbing.Hash, + useRefDeltas bool, + allDelete bool, +) (*packp.ReportStatus, error) { + + rd, wr := io.Pipe() + + config, err := s.Config() + if err != nil { + return nil, err + } + + // Set buffer size to 1 so the error message can be written when + // ReceivePack fails. Otherwise the goroutine will be blocked writing + // to the channel. + done := make(chan error, 1) + + if !allDelete { + req.Packfile = rd + go func() { + e := packfile.NewEncoder(wr, s, useRefDeltas) + if _, err := e.Encode(hs, config.Pack.Window); err != nil { + done <- wr.CloseWithError(err) + return + } + + done <- wr.Close() + }() + } else { + close(done) + } + + rs, err := sess.ReceivePack(ctx, req) + if err != nil { + // close the pipe to unlock encode write + _ = rd.Close() + return nil, err + } + + if err := <-done; err != nil { + return nil, err + } + + return rs, nil +} + +func (r *Remote) updateShallow(o *FetchOptions, resp *packp.UploadPackResponse) error { + if o.Depth == 0 || len(resp.Shallows) == 0 { + return nil + } + + shallows, err := r.s.Shallow() + if err != nil { + return err + } + +outer: + for _, s := range resp.Shallows { + for _, oldS := range shallows { + if s == oldS { + continue outer + } + } + shallows = append(shallows, s) + } + + return r.s.SetShallow(shallows) +} diff --git a/vendor/github.com/go-git/go-git/v5/repository.go b/vendor/github.com/go-git/go-git/v5/repository.go new file mode 100644 index 0000000000..36fc1f5bd3 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/repository.go @@ -0,0 +1,1737 @@ +package git + +import ( + "bytes" + "context" + "encoding/hex" + "errors" + "fmt" + "io" + stdioutil "io/ioutil" + "os" + "path" + "path/filepath" + "strings" + "time" + + "github.com/go-git/go-git/v5/storage/filesystem/dotgit" + + "github.com/go-git/go-git/v5/config" + "github.com/go-git/go-git/v5/internal/revision" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/cache" + "github.com/go-git/go-git/v5/plumbing/format/packfile" + "github.com/go-git/go-git/v5/plumbing/object" + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/storage" + "github.com/go-git/go-git/v5/storage/filesystem" + "github.com/go-git/go-git/v5/utils/ioutil" + "github.com/imdario/mergo" + "golang.org/x/crypto/openpgp" + + "github.com/go-git/go-billy/v5" + "github.com/go-git/go-billy/v5/osfs" +) + +// GitDirName this is a special folder where all the git stuff is. +const GitDirName = ".git" + +var ( + // ErrBranchExists an error stating the specified branch already exists + ErrBranchExists = errors.New("branch already exists") + // ErrBranchNotFound an error stating the specified branch does not exist + ErrBranchNotFound = errors.New("branch not found") + // ErrTagExists an error stating the specified tag already exists + ErrTagExists = errors.New("tag already exists") + // ErrTagNotFound an error stating the specified tag does not exist + ErrTagNotFound = errors.New("tag not found") + // ErrFetching is returned when the packfile could not be downloaded + ErrFetching = errors.New("unable to fetch packfile") + + ErrInvalidReference = errors.New("invalid reference, should be a tag or a branch") + ErrRepositoryNotExists = errors.New("repository does not exist") + ErrRepositoryIncomplete = errors.New("repository's commondir path does not exist") + ErrRepositoryAlreadyExists = errors.New("repository already exists") + ErrRemoteNotFound = errors.New("remote not found") + ErrRemoteExists = errors.New("remote already exists") + ErrAnonymousRemoteName = errors.New("anonymous remote name must be 'anonymous'") + ErrWorktreeNotProvided = errors.New("worktree should be provided") + ErrIsBareRepository = errors.New("worktree not available in a bare repository") + ErrUnableToResolveCommit = errors.New("unable to resolve commit") + ErrPackedObjectsNotSupported = errors.New("Packed objects not supported") +) + +// Repository represents a git repository +type Repository struct { + Storer storage.Storer + + r map[string]*Remote + wt billy.Filesystem +} + +// Init creates an empty git repository, based on the given Storer and worktree. +// The worktree Filesystem is optional, if nil a bare repository is created. If +// the given storer is not empty ErrRepositoryAlreadyExists is returned +func Init(s storage.Storer, worktree billy.Filesystem) (*Repository, error) { + if err := initStorer(s); err != nil { + return nil, err + } + + r := newRepository(s, worktree) + _, err := r.Reference(plumbing.HEAD, false) + switch err { + case plumbing.ErrReferenceNotFound: + case nil: + return nil, ErrRepositoryAlreadyExists + default: + return nil, err + } + + h := plumbing.NewSymbolicReference(plumbing.HEAD, plumbing.Master) + if err := s.SetReference(h); err != nil { + return nil, err + } + + if worktree == nil { + _ = r.setIsBare(true) + return r, nil + } + + return r, setWorktreeAndStoragePaths(r, worktree) +} + +func initStorer(s storer.Storer) error { + i, ok := s.(storer.Initializer) + if !ok { + return nil + } + + return i.Init() +} + +func setWorktreeAndStoragePaths(r *Repository, worktree billy.Filesystem) error { + type fsBased interface { + Filesystem() billy.Filesystem + } + + // .git file is only created if the storage is file based and the file + // system is osfs.OS + fs, isFSBased := r.Storer.(fsBased) + if !isFSBased { + return nil + } + + if err := createDotGitFile(worktree, fs.Filesystem()); err != nil { + return err + } + + return setConfigWorktree(r, worktree, fs.Filesystem()) +} + +func createDotGitFile(worktree, storage billy.Filesystem) error { + path, err := filepath.Rel(worktree.Root(), storage.Root()) + if err != nil { + path = storage.Root() + } + + if path == GitDirName { + // not needed, since the folder is the default place + return nil + } + + f, err := worktree.Create(GitDirName) + if err != nil { + return err + } + + defer f.Close() + _, err = fmt.Fprintf(f, "gitdir: %s\n", path) + return err +} + +func setConfigWorktree(r *Repository, worktree, storage billy.Filesystem) error { + path, err := filepath.Rel(storage.Root(), worktree.Root()) + if err != nil { + path = worktree.Root() + } + + if path == ".." { + // not needed, since the folder is the default place + return nil + } + + cfg, err := r.Config() + if err != nil { + return err + } + + cfg.Core.Worktree = path + return r.Storer.SetConfig(cfg) +} + +// Open opens a git repository using the given Storer and worktree filesystem, +// if the given storer is complete empty ErrRepositoryNotExists is returned. +// The worktree can be nil when the repository being opened is bare, if the +// repository is a normal one (not bare) and worktree is nil the err +// ErrWorktreeNotProvided is returned +func Open(s storage.Storer, worktree billy.Filesystem) (*Repository, error) { + _, err := s.Reference(plumbing.HEAD) + if err == plumbing.ErrReferenceNotFound { + return nil, ErrRepositoryNotExists + } + + if err != nil { + return nil, err + } + + return newRepository(s, worktree), nil +} + +// Clone a repository into the given Storer and worktree Filesystem with the +// given options, if worktree is nil a bare repository is created. If the given +// storer is not empty ErrRepositoryAlreadyExists is returned. +// +// The provided Context must be non-nil. If the context expires before the +// operation is complete, an error is returned. The context only affects to the +// transport operations. +func Clone(s storage.Storer, worktree billy.Filesystem, o *CloneOptions) (*Repository, error) { + return CloneContext(context.Background(), s, worktree, o) +} + +// CloneContext a repository into the given Storer and worktree Filesystem with +// the given options, if worktree is nil a bare repository is created. If the +// given storer is not empty ErrRepositoryAlreadyExists is returned. +// +// The provided Context must be non-nil. If the context expires before the +// operation is complete, an error is returned. The context only affects to the +// transport operations. +func CloneContext( + ctx context.Context, s storage.Storer, worktree billy.Filesystem, o *CloneOptions, +) (*Repository, error) { + r, err := Init(s, worktree) + if err != nil { + return nil, err + } + + return r, r.clone(ctx, o) +} + +// PlainInit create an empty git repository at the given path. isBare defines +// if the repository will have worktree (non-bare) or not (bare), if the path +// is not empty ErrRepositoryAlreadyExists is returned. +func PlainInit(path string, isBare bool) (*Repository, error) { + var wt, dot billy.Filesystem + + if isBare { + dot = osfs.New(path) + } else { + wt = osfs.New(path) + dot, _ = wt.Chroot(GitDirName) + } + + s := filesystem.NewStorage(dot, cache.NewObjectLRUDefault()) + + return Init(s, wt) +} + +// PlainOpen opens a git repository from the given path. It detects if the +// repository is bare or a normal one. If the path doesn't contain a valid +// repository ErrRepositoryNotExists is returned +func PlainOpen(path string) (*Repository, error) { + return PlainOpenWithOptions(path, &PlainOpenOptions{}) +} + +// PlainOpenWithOptions opens a git repository from the given path with specific +// options. See PlainOpen for more info. +func PlainOpenWithOptions(path string, o *PlainOpenOptions) (*Repository, error) { + dot, wt, err := dotGitToOSFilesystems(path, o.DetectDotGit) + if err != nil { + return nil, err + } + + if _, err := dot.Stat(""); err != nil { + if os.IsNotExist(err) { + return nil, ErrRepositoryNotExists + } + + return nil, err + } + + var repositoryFs billy.Filesystem + + if o.EnableDotGitCommonDir { + dotGitCommon, err := dotGitCommonDirectory(dot) + if err != nil { + return nil, err + } + repositoryFs = dotgit.NewRepositoryFilesystem(dot, dotGitCommon) + } else { + repositoryFs = dot + } + + s := filesystem.NewStorage(repositoryFs, cache.NewObjectLRUDefault()) + + return Open(s, wt) +} + +func dotGitToOSFilesystems(path string, detect bool) (dot, wt billy.Filesystem, err error) { + if path, err = filepath.Abs(path); err != nil { + return nil, nil, err + } + + pathinfo, err := os.Stat(path) + if !os.IsNotExist(err) { + if !pathinfo.IsDir() && detect { + path = filepath.Dir(path) + } + } + + var fs billy.Filesystem + var fi os.FileInfo + for { + fs = osfs.New(path) + fi, err = fs.Stat(GitDirName) + if err == nil { + // no error; stop + break + } + if !os.IsNotExist(err) { + // unknown error; stop + return nil, nil, err + } + if detect { + // try its parent as long as we haven't reached + // the root dir + if dir := filepath.Dir(path); dir != path { + path = dir + continue + } + } + // not detecting via parent dirs and the dir does not exist; + // stop + return fs, nil, nil + } + + if fi.IsDir() { + dot, err = fs.Chroot(GitDirName) + return dot, fs, err + } + + dot, err = dotGitFileToOSFilesystem(path, fs) + if err != nil { + return nil, nil, err + } + + return dot, fs, nil +} + +func dotGitFileToOSFilesystem(path string, fs billy.Filesystem) (bfs billy.Filesystem, err error) { + f, err := fs.Open(GitDirName) + if err != nil { + return nil, err + } + defer ioutil.CheckClose(f, &err) + + b, err := stdioutil.ReadAll(f) + if err != nil { + return nil, err + } + + line := string(b) + const prefix = "gitdir: " + if !strings.HasPrefix(line, prefix) { + return nil, fmt.Errorf(".git file has no %s prefix", prefix) + } + + gitdir := strings.Split(line[len(prefix):], "\n")[0] + gitdir = strings.TrimSpace(gitdir) + if filepath.IsAbs(gitdir) { + return osfs.New(gitdir), nil + } + + return osfs.New(fs.Join(path, gitdir)), nil +} + +func dotGitCommonDirectory(fs billy.Filesystem) (commonDir billy.Filesystem, err error) { + f, err := fs.Open("commondir") + if os.IsNotExist(err) { + return nil, nil + } + if err != nil { + return nil, err + } + + b, err := stdioutil.ReadAll(f) + if err != nil { + return nil, err + } + if len(b) > 0 { + path := strings.TrimSpace(string(b)) + if filepath.IsAbs(path) { + commonDir = osfs.New(path) + } else { + commonDir = osfs.New(filepath.Join(fs.Root(), path)) + } + if _, err := commonDir.Stat(""); err != nil { + if os.IsNotExist(err) { + return nil, ErrRepositoryIncomplete + } + + return nil, err + } + } + + return commonDir, nil +} + +// PlainClone a repository into the path with the given options, isBare defines +// if the new repository will be bare or normal. If the path is not empty +// ErrRepositoryAlreadyExists is returned. +// +// TODO(mcuadros): move isBare to CloneOptions in v5 +func PlainClone(path string, isBare bool, o *CloneOptions) (*Repository, error) { + return PlainCloneContext(context.Background(), path, isBare, o) +} + +// PlainCloneContext a repository into the path with the given options, isBare +// defines if the new repository will be bare or normal. If the path is not empty +// ErrRepositoryAlreadyExists is returned. +// +// The provided Context must be non-nil. If the context expires before the +// operation is complete, an error is returned. The context only affects to the +// transport operations. +// +// TODO(mcuadros): move isBare to CloneOptions in v5 +// TODO(smola): refuse upfront to clone on a non-empty directory in v5, see #1027 +func PlainCloneContext(ctx context.Context, path string, isBare bool, o *CloneOptions) (*Repository, error) { + cleanup, cleanupParent, err := checkIfCleanupIsNeeded(path) + if err != nil { + return nil, err + } + + r, err := PlainInit(path, isBare) + if err != nil { + return nil, err + } + + err = r.clone(ctx, o) + if err != nil && err != ErrRepositoryAlreadyExists { + if cleanup { + _ = cleanUpDir(path, cleanupParent) + } + } + + return r, err +} + +func newRepository(s storage.Storer, worktree billy.Filesystem) *Repository { + return &Repository{ + Storer: s, + wt: worktree, + r: make(map[string]*Remote), + } +} + +func checkIfCleanupIsNeeded(path string) (cleanup bool, cleanParent bool, err error) { + fi, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + return true, true, nil + } + + return false, false, err + } + + if !fi.IsDir() { + return false, false, fmt.Errorf("path is not a directory: %s", path) + } + + f, err := os.Open(path) + if err != nil { + return false, false, err + } + + defer ioutil.CheckClose(f, &err) + + _, err = f.Readdirnames(1) + if err == io.EOF { + return true, false, nil + } + + if err != nil { + return false, false, err + } + + return false, false, nil +} + +func cleanUpDir(path string, all bool) error { + if all { + return os.RemoveAll(path) + } + + f, err := os.Open(path) + if err != nil { + return err + } + + defer ioutil.CheckClose(f, &err) + + names, err := f.Readdirnames(-1) + if err != nil { + return err + } + + for _, name := range names { + if err := os.RemoveAll(filepath.Join(path, name)); err != nil { + return err + } + } + + return err +} + +// Config return the repository config. In a filesystem backed repository this +// means read the `.git/config`. +func (r *Repository) Config() (*config.Config, error) { + return r.Storer.Config() +} + +// SetConfig marshall and writes the repository config. In a filesystem backed +// repository this means write the `.git/config`. This function should be called +// with the result of `Repository.Config` and never with the output of +// `Repository.ConfigScoped`. +func (r *Repository) SetConfig(cfg *config.Config) error { + return r.Storer.SetConfig(cfg) +} + +// ConfigScoped returns the repository config, merged with requested scope and +// lower. For example if, config.GlobalScope is given the local and global config +// are returned merged in one config value. +func (r *Repository) ConfigScoped(scope config.Scope) (*config.Config, error) { + // TODO(mcuadros): v6, add this as ConfigOptions.Scoped + + var err error + system := config.NewConfig() + if scope >= config.SystemScope { + system, err = config.LoadConfig(config.SystemScope) + if err != nil { + return nil, err + } + } + + global := config.NewConfig() + if scope >= config.GlobalScope { + global, err = config.LoadConfig(config.GlobalScope) + if err != nil { + return nil, err + } + } + + local, err := r.Storer.Config() + if err != nil { + return nil, err + } + + _ = mergo.Merge(global, system) + _ = mergo.Merge(local, global) + return local, nil +} + +// Remote return a remote if exists +func (r *Repository) Remote(name string) (*Remote, error) { + cfg, err := r.Config() + if err != nil { + return nil, err + } + + c, ok := cfg.Remotes[name] + if !ok { + return nil, ErrRemoteNotFound + } + + return NewRemote(r.Storer, c), nil +} + +// Remotes returns a list with all the remotes +func (r *Repository) Remotes() ([]*Remote, error) { + cfg, err := r.Config() + if err != nil { + return nil, err + } + + remotes := make([]*Remote, len(cfg.Remotes)) + + var i int + for _, c := range cfg.Remotes { + remotes[i] = NewRemote(r.Storer, c) + i++ + } + + return remotes, nil +} + +// CreateRemote creates a new remote +func (r *Repository) CreateRemote(c *config.RemoteConfig) (*Remote, error) { + if err := c.Validate(); err != nil { + return nil, err + } + + remote := NewRemote(r.Storer, c) + + cfg, err := r.Config() + if err != nil { + return nil, err + } + + if _, ok := cfg.Remotes[c.Name]; ok { + return nil, ErrRemoteExists + } + + cfg.Remotes[c.Name] = c + return remote, r.Storer.SetConfig(cfg) +} + +// CreateRemoteAnonymous creates a new anonymous remote. c.Name must be "anonymous". +// It's used like 'git fetch git@github.com:src-d/go-git.git master:master'. +func (r *Repository) CreateRemoteAnonymous(c *config.RemoteConfig) (*Remote, error) { + if err := c.Validate(); err != nil { + return nil, err + } + + if c.Name != "anonymous" { + return nil, ErrAnonymousRemoteName + } + + remote := NewRemote(r.Storer, c) + + return remote, nil +} + +// DeleteRemote delete a remote from the repository and delete the config +func (r *Repository) DeleteRemote(name string) error { + cfg, err := r.Config() + if err != nil { + return err + } + + if _, ok := cfg.Remotes[name]; !ok { + return ErrRemoteNotFound + } + + delete(cfg.Remotes, name) + return r.Storer.SetConfig(cfg) +} + +// Branch return a Branch if exists +func (r *Repository) Branch(name string) (*config.Branch, error) { + cfg, err := r.Config() + if err != nil { + return nil, err + } + + b, ok := cfg.Branches[name] + if !ok { + return nil, ErrBranchNotFound + } + + return b, nil +} + +// CreateBranch creates a new Branch +func (r *Repository) CreateBranch(c *config.Branch) error { + if err := c.Validate(); err != nil { + return err + } + + cfg, err := r.Config() + if err != nil { + return err + } + + if _, ok := cfg.Branches[c.Name]; ok { + return ErrBranchExists + } + + cfg.Branches[c.Name] = c + return r.Storer.SetConfig(cfg) +} + +// DeleteBranch delete a Branch from the repository and delete the config +func (r *Repository) DeleteBranch(name string) error { + cfg, err := r.Config() + if err != nil { + return err + } + + if _, ok := cfg.Branches[name]; !ok { + return ErrBranchNotFound + } + + delete(cfg.Branches, name) + return r.Storer.SetConfig(cfg) +} + +// CreateTag creates a tag. If opts is included, the tag is an annotated tag, +// otherwise a lightweight tag is created. +func (r *Repository) CreateTag(name string, hash plumbing.Hash, opts *CreateTagOptions) (*plumbing.Reference, error) { + rname := plumbing.ReferenceName(path.Join("refs", "tags", name)) + + _, err := r.Storer.Reference(rname) + switch err { + case nil: + // Tag exists, this is an error + return nil, ErrTagExists + case plumbing.ErrReferenceNotFound: + // Tag missing, available for creation, pass this + default: + // Some other error + return nil, err + } + + var target plumbing.Hash + if opts != nil { + target, err = r.createTagObject(name, hash, opts) + if err != nil { + return nil, err + } + } else { + target = hash + } + + ref := plumbing.NewHashReference(rname, target) + if err = r.Storer.SetReference(ref); err != nil { + return nil, err + } + + return ref, nil +} + +func (r *Repository) createTagObject(name string, hash plumbing.Hash, opts *CreateTagOptions) (plumbing.Hash, error) { + if err := opts.Validate(r, hash); err != nil { + return plumbing.ZeroHash, err + } + + rawobj, err := object.GetObject(r.Storer, hash) + if err != nil { + return plumbing.ZeroHash, err + } + + tag := &object.Tag{ + Name: name, + Tagger: *opts.Tagger, + Message: opts.Message, + TargetType: rawobj.Type(), + Target: hash, + } + + if opts.SignKey != nil { + sig, err := r.buildTagSignature(tag, opts.SignKey) + if err != nil { + return plumbing.ZeroHash, err + } + + tag.PGPSignature = sig + } + + obj := r.Storer.NewEncodedObject() + if err := tag.Encode(obj); err != nil { + return plumbing.ZeroHash, err + } + + return r.Storer.SetEncodedObject(obj) +} + +func (r *Repository) buildTagSignature(tag *object.Tag, signKey *openpgp.Entity) (string, error) { + encoded := &plumbing.MemoryObject{} + if err := tag.Encode(encoded); err != nil { + return "", err + } + + rdr, err := encoded.Reader() + if err != nil { + return "", err + } + + var b bytes.Buffer + if err := openpgp.ArmoredDetachSign(&b, signKey, rdr, nil); err != nil { + return "", err + } + + return b.String(), nil +} + +// Tag returns a tag from the repository. +// +// If you want to check to see if the tag is an annotated tag, you can call +// TagObject on the hash of the reference in ForEach: +// +// ref, err := r.Tag("v0.1.0") +// if err != nil { +// // Handle error +// } +// +// obj, err := r.TagObject(ref.Hash()) +// switch err { +// case nil: +// // Tag object present +// case plumbing.ErrObjectNotFound: +// // Not a tag object +// default: +// // Some other error +// } +// +func (r *Repository) Tag(name string) (*plumbing.Reference, error) { + ref, err := r.Reference(plumbing.ReferenceName(path.Join("refs", "tags", name)), false) + if err != nil { + if err == plumbing.ErrReferenceNotFound { + // Return a friendly error for this one, versus just ReferenceNotFound. + return nil, ErrTagNotFound + } + + return nil, err + } + + return ref, nil +} + +// DeleteTag deletes a tag from the repository. +func (r *Repository) DeleteTag(name string) error { + _, err := r.Tag(name) + if err != nil { + return err + } + + return r.Storer.RemoveReference(plumbing.ReferenceName(path.Join("refs", "tags", name))) +} + +func (r *Repository) resolveToCommitHash(h plumbing.Hash) (plumbing.Hash, error) { + obj, err := r.Storer.EncodedObject(plumbing.AnyObject, h) + if err != nil { + return plumbing.ZeroHash, err + } + switch obj.Type() { + case plumbing.TagObject: + t, err := object.DecodeTag(r.Storer, obj) + if err != nil { + return plumbing.ZeroHash, err + } + return r.resolveToCommitHash(t.Target) + case plumbing.CommitObject: + return h, nil + default: + return plumbing.ZeroHash, ErrUnableToResolveCommit + } +} + +// Clone clones a remote repository +func (r *Repository) clone(ctx context.Context, o *CloneOptions) error { + if err := o.Validate(); err != nil { + return err + } + + c := &config.RemoteConfig{ + Name: o.RemoteName, + URLs: []string{o.URL}, + Fetch: r.cloneRefSpec(o), + } + + if _, err := r.CreateRemote(c); err != nil { + return err + } + + ref, err := r.fetchAndUpdateReferences(ctx, &FetchOptions{ + RefSpecs: c.Fetch, + Depth: o.Depth, + Auth: o.Auth, + Progress: o.Progress, + Tags: o.Tags, + RemoteName: o.RemoteName, + }, o.ReferenceName) + if err != nil { + return err + } + + if r.wt != nil && !o.NoCheckout { + w, err := r.Worktree() + if err != nil { + return err + } + + head, err := r.Head() + if err != nil { + return err + } + + if err := w.Reset(&ResetOptions{ + Mode: MergeReset, + Commit: head.Hash(), + }); err != nil { + return err + } + + if o.RecurseSubmodules != NoRecurseSubmodules { + if err := w.updateSubmodules(&SubmoduleUpdateOptions{ + RecurseSubmodules: o.RecurseSubmodules, + Auth: o.Auth, + }); err != nil { + return err + } + } + } + + if err := r.updateRemoteConfigIfNeeded(o, c, ref); err != nil { + return err + } + + if ref.Name().IsBranch() { + branchRef := ref.Name() + branchName := strings.Split(string(branchRef), "refs/heads/")[1] + + b := &config.Branch{ + Name: branchName, + Merge: branchRef, + } + if o.RemoteName == "" { + b.Remote = "origin" + } else { + b.Remote = o.RemoteName + } + if err := r.CreateBranch(b); err != nil { + return err + } + } + + return nil +} + +const ( + refspecTag = "+refs/tags/%s:refs/tags/%[1]s" + refspecSingleBranch = "+refs/heads/%s:refs/remotes/%s/%[1]s" + refspecSingleBranchHEAD = "+HEAD:refs/remotes/%s/HEAD" +) + +func (r *Repository) cloneRefSpec(o *CloneOptions) []config.RefSpec { + switch { + case o.ReferenceName.IsTag(): + return []config.RefSpec{ + config.RefSpec(fmt.Sprintf(refspecTag, o.ReferenceName.Short())), + } + case o.SingleBranch && o.ReferenceName == plumbing.HEAD: + return []config.RefSpec{ + config.RefSpec(fmt.Sprintf(refspecSingleBranchHEAD, o.RemoteName)), + config.RefSpec(fmt.Sprintf(refspecSingleBranch, plumbing.Master.Short(), o.RemoteName)), + } + case o.SingleBranch: + return []config.RefSpec{ + config.RefSpec(fmt.Sprintf(refspecSingleBranch, o.ReferenceName.Short(), o.RemoteName)), + } + default: + return []config.RefSpec{ + config.RefSpec(fmt.Sprintf(config.DefaultFetchRefSpec, o.RemoteName)), + } + } +} + +func (r *Repository) setIsBare(isBare bool) error { + cfg, err := r.Config() + if err != nil { + return err + } + + cfg.Core.IsBare = isBare + return r.Storer.SetConfig(cfg) +} + +func (r *Repository) updateRemoteConfigIfNeeded(o *CloneOptions, c *config.RemoteConfig, head *plumbing.Reference) error { + if !o.SingleBranch { + return nil + } + + c.Fetch = r.cloneRefSpec(o) + + cfg, err := r.Config() + if err != nil { + return err + } + + cfg.Remotes[c.Name] = c + return r.Storer.SetConfig(cfg) +} + +func (r *Repository) fetchAndUpdateReferences( + ctx context.Context, o *FetchOptions, ref plumbing.ReferenceName, +) (*plumbing.Reference, error) { + + if err := o.Validate(); err != nil { + return nil, err + } + + remote, err := r.Remote(o.RemoteName) + if err != nil { + return nil, err + } + + objsUpdated := true + remoteRefs, err := remote.fetch(ctx, o) + if err == NoErrAlreadyUpToDate { + objsUpdated = false + } else if err == packfile.ErrEmptyPackfile { + return nil, ErrFetching + } else if err != nil { + return nil, err + } + + resolvedRef, err := storer.ResolveReference(remoteRefs, ref) + if err != nil { + return nil, err + } + + refsUpdated, err := r.updateReferences(remote.c.Fetch, resolvedRef) + if err != nil { + return nil, err + } + + if !objsUpdated && !refsUpdated { + return nil, NoErrAlreadyUpToDate + } + + return resolvedRef, nil +} + +func (r *Repository) updateReferences(spec []config.RefSpec, + resolvedRef *plumbing.Reference) (updated bool, err error) { + + if !resolvedRef.Name().IsBranch() { + // Detached HEAD mode + h, err := r.resolveToCommitHash(resolvedRef.Hash()) + if err != nil { + return false, err + } + head := plumbing.NewHashReference(plumbing.HEAD, h) + return updateReferenceStorerIfNeeded(r.Storer, head) + } + + refs := []*plumbing.Reference{ + // Create local reference for the resolved ref + resolvedRef, + // Create local symbolic HEAD + plumbing.NewSymbolicReference(plumbing.HEAD, resolvedRef.Name()), + } + + refs = append(refs, r.calculateRemoteHeadReference(spec, resolvedRef)...) + + for _, ref := range refs { + u, err := updateReferenceStorerIfNeeded(r.Storer, ref) + if err != nil { + return updated, err + } + + if u { + updated = true + } + } + + return +} + +func (r *Repository) calculateRemoteHeadReference(spec []config.RefSpec, + resolvedHead *plumbing.Reference) []*plumbing.Reference { + + var refs []*plumbing.Reference + + // Create resolved HEAD reference with remote prefix if it does not + // exist. This is needed when using single branch and HEAD. + for _, rs := range spec { + name := resolvedHead.Name() + if !rs.Match(name) { + continue + } + + name = rs.Dst(name) + _, err := r.Storer.Reference(name) + if err == plumbing.ErrReferenceNotFound { + refs = append(refs, plumbing.NewHashReference(name, resolvedHead.Hash())) + } + } + + return refs +} + +func checkAndUpdateReferenceStorerIfNeeded( + s storer.ReferenceStorer, r, old *plumbing.Reference) ( + updated bool, err error) { + p, err := s.Reference(r.Name()) + if err != nil && err != plumbing.ErrReferenceNotFound { + return false, err + } + + // we use the string method to compare references, is the easiest way + if err == plumbing.ErrReferenceNotFound || r.String() != p.String() { + if err := s.CheckAndSetReference(r, old); err != nil { + return false, err + } + + return true, nil + } + + return false, nil +} + +func updateReferenceStorerIfNeeded( + s storer.ReferenceStorer, r *plumbing.Reference) (updated bool, err error) { + return checkAndUpdateReferenceStorerIfNeeded(s, r, nil) +} + +// Fetch fetches references along with the objects necessary to complete +// their histories, from the remote named as FetchOptions.RemoteName. +// +// Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are +// no changes to be fetched, or an error. +func (r *Repository) Fetch(o *FetchOptions) error { + return r.FetchContext(context.Background(), o) +} + +// FetchContext fetches references along with the objects necessary to complete +// their histories, from the remote named as FetchOptions.RemoteName. +// +// Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are +// no changes to be fetched, or an error. +// +// The provided Context must be non-nil. If the context expires before the +// operation is complete, an error is returned. The context only affects to the +// transport operations. +func (r *Repository) FetchContext(ctx context.Context, o *FetchOptions) error { + if err := o.Validate(); err != nil { + return err + } + + remote, err := r.Remote(o.RemoteName) + if err != nil { + return err + } + + return remote.FetchContext(ctx, o) +} + +// Push performs a push to the remote. Returns NoErrAlreadyUpToDate if +// the remote was already up-to-date, from the remote named as +// FetchOptions.RemoteName. +func (r *Repository) Push(o *PushOptions) error { + return r.PushContext(context.Background(), o) +} + +// PushContext performs a push to the remote. Returns NoErrAlreadyUpToDate if +// the remote was already up-to-date, from the remote named as +// FetchOptions.RemoteName. +// +// The provided Context must be non-nil. If the context expires before the +// operation is complete, an error is returned. The context only affects to the +// transport operations. +func (r *Repository) PushContext(ctx context.Context, o *PushOptions) error { + if err := o.Validate(); err != nil { + return err + } + + remote, err := r.Remote(o.RemoteName) + if err != nil { + return err + } + + return remote.PushContext(ctx, o) +} + +// Log returns the commit history from the given LogOptions. +func (r *Repository) Log(o *LogOptions) (object.CommitIter, error) { + fn := commitIterFunc(o.Order) + if fn == nil { + return nil, fmt.Errorf("invalid Order=%v", o.Order) + } + + var ( + it object.CommitIter + err error + ) + if o.All { + it, err = r.logAll(fn) + } else { + it, err = r.log(o.From, fn) + } + + if err != nil { + return nil, err + } + + if o.FileName != nil { + // for `git log --all` also check parent (if the next commit comes from the real parent) + it = r.logWithFile(*o.FileName, it, o.All) + } + if o.PathFilter != nil { + it = r.logWithPathFilter(o.PathFilter, it, o.All) + } + + if o.Since != nil || o.Until != nil { + limitOptions := object.LogLimitOptions{Since: o.Since, Until: o.Until} + it = r.logWithLimit(it, limitOptions) + } + + return it, nil +} + +func (r *Repository) log(from plumbing.Hash, commitIterFunc func(*object.Commit) object.CommitIter) (object.CommitIter, error) { + h := from + if from == plumbing.ZeroHash { + head, err := r.Head() + if err != nil { + return nil, err + } + + h = head.Hash() + } + + commit, err := r.CommitObject(h) + if err != nil { + return nil, err + } + return commitIterFunc(commit), nil +} + +func (r *Repository) logAll(commitIterFunc func(*object.Commit) object.CommitIter) (object.CommitIter, error) { + return object.NewCommitAllIter(r.Storer, commitIterFunc) +} + +func (*Repository) logWithFile(fileName string, commitIter object.CommitIter, checkParent bool) object.CommitIter { + return object.NewCommitPathIterFromIter( + func(path string) bool { + return path == fileName + }, + commitIter, + checkParent, + ) +} + +func (*Repository) logWithPathFilter(pathFilter func(string) bool, commitIter object.CommitIter, checkParent bool) object.CommitIter { + return object.NewCommitPathIterFromIter( + pathFilter, + commitIter, + checkParent, + ) +} + +func (*Repository) logWithLimit(commitIter object.CommitIter, limitOptions object.LogLimitOptions) object.CommitIter { + return object.NewCommitLimitIterFromIter(commitIter, limitOptions) +} + +func commitIterFunc(order LogOrder) func(c *object.Commit) object.CommitIter { + switch order { + case LogOrderDefault: + return func(c *object.Commit) object.CommitIter { + return object.NewCommitPreorderIter(c, nil, nil) + } + case LogOrderDFS: + return func(c *object.Commit) object.CommitIter { + return object.NewCommitPreorderIter(c, nil, nil) + } + case LogOrderDFSPost: + return func(c *object.Commit) object.CommitIter { + return object.NewCommitPostorderIter(c, nil) + } + case LogOrderBSF: + return func(c *object.Commit) object.CommitIter { + return object.NewCommitIterBSF(c, nil, nil) + } + case LogOrderCommitterTime: + return func(c *object.Commit) object.CommitIter { + return object.NewCommitIterCTime(c, nil, nil) + } + } + return nil +} + +// Tags returns all the tag References in a repository. +// +// If you want to check to see if the tag is an annotated tag, you can call +// TagObject on the hash Reference passed in through ForEach: +// +// iter, err := r.Tags() +// if err != nil { +// // Handle error +// } +// +// if err := iter.ForEach(func (ref *plumbing.Reference) error { +// obj, err := r.TagObject(ref.Hash()) +// switch err { +// case nil: +// // Tag object present +// case plumbing.ErrObjectNotFound: +// // Not a tag object +// default: +// // Some other error +// return err +// } +// }); err != nil { +// // Handle outer iterator error +// } +// +func (r *Repository) Tags() (storer.ReferenceIter, error) { + refIter, err := r.Storer.IterReferences() + if err != nil { + return nil, err + } + + return storer.NewReferenceFilteredIter( + func(r *plumbing.Reference) bool { + return r.Name().IsTag() + }, refIter), nil +} + +// Branches returns all the References that are Branches. +func (r *Repository) Branches() (storer.ReferenceIter, error) { + refIter, err := r.Storer.IterReferences() + if err != nil { + return nil, err + } + + return storer.NewReferenceFilteredIter( + func(r *plumbing.Reference) bool { + return r.Name().IsBranch() + }, refIter), nil +} + +// Notes returns all the References that are notes. For more information: +// https://git-scm.com/docs/git-notes +func (r *Repository) Notes() (storer.ReferenceIter, error) { + refIter, err := r.Storer.IterReferences() + if err != nil { + return nil, err + } + + return storer.NewReferenceFilteredIter( + func(r *plumbing.Reference) bool { + return r.Name().IsNote() + }, refIter), nil +} + +// TreeObject return a Tree with the given hash. If not found +// plumbing.ErrObjectNotFound is returned +func (r *Repository) TreeObject(h plumbing.Hash) (*object.Tree, error) { + return object.GetTree(r.Storer, h) +} + +// TreeObjects returns an unsorted TreeIter with all the trees in the repository +func (r *Repository) TreeObjects() (*object.TreeIter, error) { + iter, err := r.Storer.IterEncodedObjects(plumbing.TreeObject) + if err != nil { + return nil, err + } + + return object.NewTreeIter(r.Storer, iter), nil +} + +// CommitObject return a Commit with the given hash. If not found +// plumbing.ErrObjectNotFound is returned. +func (r *Repository) CommitObject(h plumbing.Hash) (*object.Commit, error) { + return object.GetCommit(r.Storer, h) +} + +// CommitObjects returns an unsorted CommitIter with all the commits in the repository. +func (r *Repository) CommitObjects() (object.CommitIter, error) { + iter, err := r.Storer.IterEncodedObjects(plumbing.CommitObject) + if err != nil { + return nil, err + } + + return object.NewCommitIter(r.Storer, iter), nil +} + +// BlobObject returns a Blob with the given hash. If not found +// plumbing.ErrObjectNotFound is returned. +func (r *Repository) BlobObject(h plumbing.Hash) (*object.Blob, error) { + return object.GetBlob(r.Storer, h) +} + +// BlobObjects returns an unsorted BlobIter with all the blobs in the repository. +func (r *Repository) BlobObjects() (*object.BlobIter, error) { + iter, err := r.Storer.IterEncodedObjects(plumbing.BlobObject) + if err != nil { + return nil, err + } + + return object.NewBlobIter(r.Storer, iter), nil +} + +// TagObject returns a Tag with the given hash. If not found +// plumbing.ErrObjectNotFound is returned. This method only returns +// annotated Tags, no lightweight Tags. +func (r *Repository) TagObject(h plumbing.Hash) (*object.Tag, error) { + return object.GetTag(r.Storer, h) +} + +// TagObjects returns a unsorted TagIter that can step through all of the annotated +// tags in the repository. +func (r *Repository) TagObjects() (*object.TagIter, error) { + iter, err := r.Storer.IterEncodedObjects(plumbing.TagObject) + if err != nil { + return nil, err + } + + return object.NewTagIter(r.Storer, iter), nil +} + +// Object returns an Object with the given hash. If not found +// plumbing.ErrObjectNotFound is returned. +func (r *Repository) Object(t plumbing.ObjectType, h plumbing.Hash) (object.Object, error) { + obj, err := r.Storer.EncodedObject(t, h) + if err != nil { + return nil, err + } + + return object.DecodeObject(r.Storer, obj) +} + +// Objects returns an unsorted ObjectIter with all the objects in the repository. +func (r *Repository) Objects() (*object.ObjectIter, error) { + iter, err := r.Storer.IterEncodedObjects(plumbing.AnyObject) + if err != nil { + return nil, err + } + + return object.NewObjectIter(r.Storer, iter), nil +} + +// Head returns the reference where HEAD is pointing to. +func (r *Repository) Head() (*plumbing.Reference, error) { + return storer.ResolveReference(r.Storer, plumbing.HEAD) +} + +// Reference returns the reference for a given reference name. If resolved is +// true, any symbolic reference will be resolved. +func (r *Repository) Reference(name plumbing.ReferenceName, resolved bool) ( + *plumbing.Reference, error) { + + if resolved { + return storer.ResolveReference(r.Storer, name) + } + + return r.Storer.Reference(name) +} + +// References returns an unsorted ReferenceIter for all references. +func (r *Repository) References() (storer.ReferenceIter, error) { + return r.Storer.IterReferences() +} + +// Worktree returns a worktree based on the given fs, if nil the default +// worktree will be used. +func (r *Repository) Worktree() (*Worktree, error) { + if r.wt == nil { + return nil, ErrIsBareRepository + } + + return &Worktree{r: r, Filesystem: r.wt}, nil +} + +// ResolveRevision resolves revision to corresponding hash. It will always +// resolve to a commit hash, not a tree or annotated tag. +// +// Implemented resolvers : HEAD, branch, tag, heads/branch, refs/heads/branch, +// refs/tags/tag, refs/remotes/origin/branch, refs/remotes/origin/HEAD, tilde and caret (HEAD~1, master~^, tag~2, ref/heads/master~1, ...), selection by text (HEAD^{/fix nasty bug}), hash (prefix and full) +func (r *Repository) ResolveRevision(rev plumbing.Revision) (*plumbing.Hash, error) { + p := revision.NewParserFromString(string(rev)) + + items, err := p.Parse() + + if err != nil { + return nil, err + } + + var commit *object.Commit + + for _, item := range items { + switch item := item.(type) { + case revision.Ref: + revisionRef := item + + var tryHashes []plumbing.Hash + + tryHashes = append(tryHashes, r.resolveHashPrefix(string(revisionRef))...) + + for _, rule := range append([]string{"%s"}, plumbing.RefRevParseRules...) { + ref, err := storer.ResolveReference(r.Storer, plumbing.ReferenceName(fmt.Sprintf(rule, revisionRef))) + + if err == nil { + tryHashes = append(tryHashes, ref.Hash()) + break + } + } + + // in ambiguous cases, `git rev-parse` will emit a warning, but + // will always return the oid in preference to a ref; we don't have + // the ability to emit a warning here, so (for speed purposes) + // don't bother to detect the ambiguity either, just return in the + // priority that git would. + gotOne := false + for _, hash := range tryHashes { + commitObj, err := r.CommitObject(hash) + if err == nil { + commit = commitObj + gotOne = true + break + } + + tagObj, err := r.TagObject(hash) + if err == nil { + // If the tag target lookup fails here, this most likely + // represents some sort of repo corruption, so let the + // error bubble up. + tagCommit, err := tagObj.Commit() + if err != nil { + return &plumbing.ZeroHash, err + } + commit = tagCommit + gotOne = true + break + } + } + + if !gotOne { + return &plumbing.ZeroHash, plumbing.ErrReferenceNotFound + } + + case revision.CaretPath: + depth := item.Depth + + if depth == 0 { + break + } + + iter := commit.Parents() + + c, err := iter.Next() + + if err != nil { + return &plumbing.ZeroHash, err + } + + if depth == 1 { + commit = c + + break + } + + c, err = iter.Next() + + if err != nil { + return &plumbing.ZeroHash, err + } + + commit = c + case revision.TildePath: + for i := 0; i < item.Depth; i++ { + c, err := commit.Parents().Next() + + if err != nil { + return &plumbing.ZeroHash, err + } + + commit = c + } + case revision.CaretReg: + history := object.NewCommitPreorderIter(commit, nil, nil) + + re := item.Regexp + negate := item.Negate + + var c *object.Commit + + err := history.ForEach(func(hc *object.Commit) error { + if !negate && re.MatchString(hc.Message) { + c = hc + return storer.ErrStop + } + + if negate && !re.MatchString(hc.Message) { + c = hc + return storer.ErrStop + } + + return nil + }) + if err != nil { + return &plumbing.ZeroHash, err + } + + if c == nil { + return &plumbing.ZeroHash, fmt.Errorf(`No commit message match regexp : "%s"`, re.String()) + } + + commit = c + } + } + + return &commit.Hash, nil +} + +// resolveHashPrefix returns a list of potential hashes that the given string +// is a prefix of. It quietly swallows errors, returning nil. +func (r *Repository) resolveHashPrefix(hashStr string) []plumbing.Hash { + // Handle complete and partial hashes. + // plumbing.NewHash forces args into a full 20 byte hash, which isn't suitable + // for partial hashes since they will become zero-filled. + + if hashStr == "" { + return nil + } + if len(hashStr) == len(plumbing.ZeroHash)*2 { + // Only a full hash is possible. + hexb, err := hex.DecodeString(hashStr) + if err != nil { + return nil + } + var h plumbing.Hash + copy(h[:], hexb) + return []plumbing.Hash{h} + } + + // Partial hash. + // hex.DecodeString only decodes to complete bytes, so only works with pairs of hex digits. + evenHex := hashStr[:len(hashStr)&^1] + hexb, err := hex.DecodeString(evenHex) + if err != nil { + return nil + } + candidates := expandPartialHash(r.Storer, hexb) + if len(evenHex) == len(hashStr) { + // The prefix was an exact number of bytes. + return candidates + } + // Do another prefix check to ensure the dangling nybble is correct. + var hashes []plumbing.Hash + for _, h := range candidates { + if strings.HasPrefix(h.String(), hashStr) { + hashes = append(hashes, h) + } + } + return hashes +} + +type RepackConfig struct { + // UseRefDeltas configures whether packfile encoder will use reference deltas. + // By default OFSDeltaObject is used. + UseRefDeltas bool + // OnlyDeletePacksOlderThan if set to non-zero value + // selects only objects older than the time provided. + OnlyDeletePacksOlderThan time.Time +} + +func (r *Repository) RepackObjects(cfg *RepackConfig) (err error) { + pos, ok := r.Storer.(storer.PackedObjectStorer) + if !ok { + return ErrPackedObjectsNotSupported + } + + // Get the existing object packs. + hs, err := pos.ObjectPacks() + if err != nil { + return err + } + + // Create a new pack. + nh, err := r.createNewObjectPack(cfg) + if err != nil { + return err + } + + // Delete old packs. + for _, h := range hs { + // Skip if new hash is the same as an old one. + if h == nh { + continue + } + err = pos.DeleteOldObjectPackAndIndex(h, cfg.OnlyDeletePacksOlderThan) + if err != nil { + return err + } + } + + return nil +} + +// createNewObjectPack is a helper for RepackObjects taking care +// of creating a new pack. It is used so the the PackfileWriter +// deferred close has the right scope. +func (r *Repository) createNewObjectPack(cfg *RepackConfig) (h plumbing.Hash, err error) { + ow := newObjectWalker(r.Storer) + err = ow.walkAllRefs() + if err != nil { + return h, err + } + objs := make([]plumbing.Hash, 0, len(ow.seen)) + for h := range ow.seen { + objs = append(objs, h) + } + pfw, ok := r.Storer.(storer.PackfileWriter) + if !ok { + return h, fmt.Errorf("Repository storer is not a storer.PackfileWriter") + } + wc, err := pfw.PackfileWriter() + if err != nil { + return h, err + } + defer ioutil.CheckClose(wc, &err) + scfg, err := r.Config() + if err != nil { + return h, err + } + enc := packfile.NewEncoder(wc, r.Storer, cfg.UseRefDeltas) + h, err = enc.Encode(objs, scfg.Pack.Window) + if err != nil { + return h, err + } + + // Delete the packed, loose objects. + if los, ok := r.Storer.(storer.LooseObjectStorer); ok { + err = los.ForEachObjectHash(func(hash plumbing.Hash) error { + if ow.isSeen(hash) { + err = los.DeleteLooseObject(hash) + if err != nil { + return err + } + } + return nil + }) + if err != nil { + return h, err + } + } + + return h, err +} + +func expandPartialHash(st storer.EncodedObjectStorer, prefix []byte) (hashes []plumbing.Hash) { + // The fast version is implemented by storage/filesystem.ObjectStorage. + type fastIter interface { + HashesWithPrefix(prefix []byte) ([]plumbing.Hash, error) + } + if fi, ok := st.(fastIter); ok { + h, err := fi.HashesWithPrefix(prefix) + if err != nil { + return nil + } + return h + } + + // Slow path. + iter, err := st.IterEncodedObjects(plumbing.AnyObject) + if err != nil { + return nil + } + iter.ForEach(func(obj plumbing.EncodedObject) error { + h := obj.Hash() + if bytes.HasPrefix(h[:], prefix) { + hashes = append(hashes, h) + } + return nil + }) + return +} diff --git a/vendor/github.com/go-git/go-git/v5/status.go b/vendor/github.com/go-git/go-git/v5/status.go new file mode 100644 index 0000000000..7f18e02278 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/status.go @@ -0,0 +1,79 @@ +package git + +import ( + "bytes" + "fmt" + "path/filepath" +) + +// Status represents the current status of a Worktree. +// The key of the map is the path of the file. +type Status map[string]*FileStatus + +// File returns the FileStatus for a given path, if the FileStatus doesn't +// exists a new FileStatus is added to the map using the path as key. +func (s Status) File(path string) *FileStatus { + if _, ok := (s)[path]; !ok { + s[path] = &FileStatus{Worktree: Untracked, Staging: Untracked} + } + + return s[path] +} + +// IsUntracked checks if file for given path is 'Untracked' +func (s Status) IsUntracked(path string) bool { + stat, ok := (s)[filepath.ToSlash(path)] + return ok && stat.Worktree == Untracked +} + +// IsClean returns true if all the files are in Unmodified status. +func (s Status) IsClean() bool { + for _, status := range s { + if status.Worktree != Unmodified || status.Staging != Unmodified { + return false + } + } + + return true +} + +func (s Status) String() string { + buf := bytes.NewBuffer(nil) + for path, status := range s { + if status.Staging == Unmodified && status.Worktree == Unmodified { + continue + } + + if status.Staging == Renamed { + path = fmt.Sprintf("%s -> %s", path, status.Extra) + } + + fmt.Fprintf(buf, "%c%c %s\n", status.Staging, status.Worktree, path) + } + + return buf.String() +} + +// FileStatus contains the status of a file in the worktree +type FileStatus struct { + // Staging is the status of a file in the staging area + Staging StatusCode + // Worktree is the status of a file in the worktree + Worktree StatusCode + // Extra contains extra information, such as the previous name in a rename + Extra string +} + +// StatusCode status code of a file in the Worktree +type StatusCode byte + +const ( + Unmodified StatusCode = ' ' + Untracked StatusCode = '?' + Modified StatusCode = 'M' + Added StatusCode = 'A' + Deleted StatusCode = 'D' + Renamed StatusCode = 'R' + Copied StatusCode = 'C' + UpdatedButUnmerged StatusCode = 'U' +) diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/config.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/config.go new file mode 100644 index 0000000000..78a646465a --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/storage/filesystem/config.go @@ -0,0 +1,48 @@ +package filesystem + +import ( + "os" + + "github.com/go-git/go-git/v5/config" + "github.com/go-git/go-git/v5/storage/filesystem/dotgit" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +type ConfigStorage struct { + dir *dotgit.DotGit +} + +func (c *ConfigStorage) Config() (conf *config.Config, err error) { + f, err := c.dir.Config() + if err != nil { + if os.IsNotExist(err) { + return config.NewConfig(), nil + } + + return nil, err + } + + defer ioutil.CheckClose(f, &err) + return config.ReadConfig(f) +} + +func (c *ConfigStorage) SetConfig(cfg *config.Config) (err error) { + if err = cfg.Validate(); err != nil { + return err + } + + f, err := c.dir.ConfigWriter() + if err != nil { + return err + } + + defer ioutil.CheckClose(f, &err) + + b, err := cfg.Marshal() + if err != nil { + return err + } + + _, err = f.Write(b) + return err +} diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/deltaobject.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/deltaobject.go new file mode 100644 index 0000000000..6ab2cdf38a --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/storage/filesystem/deltaobject.go @@ -0,0 +1,37 @@ +package filesystem + +import ( + "github.com/go-git/go-git/v5/plumbing" +) + +type deltaObject struct { + plumbing.EncodedObject + base plumbing.Hash + hash plumbing.Hash + size int64 +} + +func newDeltaObject( + obj plumbing.EncodedObject, + hash plumbing.Hash, + base plumbing.Hash, + size int64) plumbing.DeltaObject { + return &deltaObject{ + EncodedObject: obj, + hash: hash, + base: base, + size: size, + } +} + +func (o *deltaObject) BaseHash() plumbing.Hash { + return o.base +} + +func (o *deltaObject) ActualSize() int64 { + return o.size +} + +func (o *deltaObject) ActualHash() plumbing.Hash { + return o.hash +} diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go new file mode 100644 index 0000000000..6c386f7992 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go @@ -0,0 +1,1188 @@ +// https://github.com/git/git/blob/master/Documentation/gitrepository-layout.txt +package dotgit + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + stdioutil "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/go-git/go-billy/v5/osfs" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/storage" + "github.com/go-git/go-git/v5/utils/ioutil" + + "github.com/go-git/go-billy/v5" +) + +const ( + suffix = ".git" + packedRefsPath = "packed-refs" + configPath = "config" + indexPath = "index" + shallowPath = "shallow" + modulePath = "modules" + objectsPath = "objects" + packPath = "pack" + refsPath = "refs" + branchesPath = "branches" + hooksPath = "hooks" + infoPath = "info" + remotesPath = "remotes" + logsPath = "logs" + worktreesPath = "worktrees" + + tmpPackedRefsPrefix = "._packed-refs" + + packPrefix = "pack-" + packExt = ".pack" + idxExt = ".idx" +) + +var ( + // ErrNotFound is returned by New when the path is not found. + ErrNotFound = errors.New("path not found") + // ErrIdxNotFound is returned by Idxfile when the idx file is not found + ErrIdxNotFound = errors.New("idx file not found") + // ErrPackfileNotFound is returned by Packfile when the packfile is not found + ErrPackfileNotFound = errors.New("packfile not found") + // ErrConfigNotFound is returned by Config when the config is not found + ErrConfigNotFound = errors.New("config file not found") + // ErrPackedRefsDuplicatedRef is returned when a duplicated reference is + // found in the packed-ref file. This is usually the case for corrupted git + // repositories. + ErrPackedRefsDuplicatedRef = errors.New("duplicated ref found in packed-ref file") + // ErrPackedRefsBadFormat is returned when the packed-ref file corrupt. + ErrPackedRefsBadFormat = errors.New("malformed packed-ref") + // ErrSymRefTargetNotFound is returned when a symbolic reference is + // targeting a non-existing object. This usually means the repository + // is corrupt. + ErrSymRefTargetNotFound = errors.New("symbolic reference target not found") + // ErrIsDir is returned when a reference file is attempting to be read, + // but the path specified is a directory. + ErrIsDir = errors.New("reference path is a directory") +) + +// Options holds configuration for the storage. +type Options struct { + // ExclusiveAccess means that the filesystem is not modified externally + // while the repo is open. + ExclusiveAccess bool + // KeepDescriptors makes the file descriptors to be reused but they will + // need to be manually closed calling Close(). + KeepDescriptors bool +} + +// The DotGit type represents a local git repository on disk. This +// type is not zero-value-safe, use the New function to initialize it. +type DotGit struct { + options Options + fs billy.Filesystem + + // incoming object directory information + incomingChecked bool + incomingDirName string + + objectList []plumbing.Hash // sorted + objectMap map[plumbing.Hash]struct{} + packList []plumbing.Hash + packMap map[plumbing.Hash]struct{} + + files map[plumbing.Hash]billy.File +} + +// New returns a DotGit value ready to be used. The path argument must +// be the absolute path of a git repository directory (e.g. +// "/foo/bar/.git"). +func New(fs billy.Filesystem) *DotGit { + return NewWithOptions(fs, Options{}) +} + +// NewWithOptions sets non default configuration options. +// See New for complete help. +func NewWithOptions(fs billy.Filesystem, o Options) *DotGit { + return &DotGit{ + options: o, + fs: fs, + } +} + +// Initialize creates all the folder scaffolding. +func (d *DotGit) Initialize() error { + mustExists := []string{ + d.fs.Join("objects", "info"), + d.fs.Join("objects", "pack"), + d.fs.Join("refs", "heads"), + d.fs.Join("refs", "tags"), + } + + for _, path := range mustExists { + _, err := d.fs.Stat(path) + if err == nil { + continue + } + + if !os.IsNotExist(err) { + return err + } + + if err := d.fs.MkdirAll(path, os.ModeDir|os.ModePerm); err != nil { + return err + } + } + + return nil +} + +// Close closes all opened files. +func (d *DotGit) Close() error { + var firstError error + if d.files != nil { + for _, f := range d.files { + err := f.Close() + if err != nil && firstError == nil { + firstError = err + continue + } + } + + d.files = nil + } + + if firstError != nil { + return firstError + } + + return nil +} + +// ConfigWriter returns a file pointer for write to the config file +func (d *DotGit) ConfigWriter() (billy.File, error) { + return d.fs.Create(configPath) +} + +// Config returns a file pointer for read to the config file +func (d *DotGit) Config() (billy.File, error) { + return d.fs.Open(configPath) +} + +// IndexWriter returns a file pointer for write to the index file +func (d *DotGit) IndexWriter() (billy.File, error) { + return d.fs.Create(indexPath) +} + +// Index returns a file pointer for read to the index file +func (d *DotGit) Index() (billy.File, error) { + return d.fs.Open(indexPath) +} + +// ShallowWriter returns a file pointer for write to the shallow file +func (d *DotGit) ShallowWriter() (billy.File, error) { + return d.fs.Create(shallowPath) +} + +// Shallow returns a file pointer for read to the shallow file +func (d *DotGit) Shallow() (billy.File, error) { + f, err := d.fs.Open(shallowPath) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + + return nil, err + } + + return f, nil +} + +// NewObjectPack return a writer for a new packfile, it saves the packfile to +// disk and also generates and save the index for the given packfile. +func (d *DotGit) NewObjectPack() (*PackWriter, error) { + d.cleanPackList() + return newPackWrite(d.fs) +} + +// ObjectPacks returns the list of availables packfiles +func (d *DotGit) ObjectPacks() ([]plumbing.Hash, error) { + if !d.options.ExclusiveAccess { + return d.objectPacks() + } + + err := d.genPackList() + if err != nil { + return nil, err + } + + return d.packList, nil +} + +func (d *DotGit) objectPacks() ([]plumbing.Hash, error) { + packDir := d.fs.Join(objectsPath, packPath) + files, err := d.fs.ReadDir(packDir) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + + return nil, err + } + + var packs []plumbing.Hash + for _, f := range files { + n := f.Name() + if !strings.HasSuffix(n, packExt) || !strings.HasPrefix(n, packPrefix) { + continue + } + + h := plumbing.NewHash(n[5 : len(n)-5]) //pack-(hash).pack + if h.IsZero() { + // Ignore files with badly-formatted names. + continue + } + packs = append(packs, h) + } + + return packs, nil +} + +func (d *DotGit) objectPackPath(hash plumbing.Hash, extension string) string { + return d.fs.Join(objectsPath, packPath, fmt.Sprintf("pack-%s.%s", hash.String(), extension)) +} + +func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.File, error) { + if d.options.KeepDescriptors && extension == "pack" { + if d.files == nil { + d.files = make(map[plumbing.Hash]billy.File) + } + + f, ok := d.files[hash] + if ok { + return f, nil + } + } + + err := d.hasPack(hash) + if err != nil { + return nil, err + } + + path := d.objectPackPath(hash, extension) + pack, err := d.fs.Open(path) + if err != nil { + if os.IsNotExist(err) { + return nil, ErrPackfileNotFound + } + + return nil, err + } + + if d.options.KeepDescriptors && extension == "pack" { + d.files[hash] = pack + } + + return pack, nil +} + +// ObjectPack returns a fs.File of the given packfile +func (d *DotGit) ObjectPack(hash plumbing.Hash) (billy.File, error) { + err := d.hasPack(hash) + if err != nil { + return nil, err + } + + return d.objectPackOpen(hash, `pack`) +} + +// ObjectPackIdx returns a fs.File of the index file for a given packfile +func (d *DotGit) ObjectPackIdx(hash plumbing.Hash) (billy.File, error) { + err := d.hasPack(hash) + if err != nil { + return nil, err + } + + return d.objectPackOpen(hash, `idx`) +} + +func (d *DotGit) DeleteOldObjectPackAndIndex(hash plumbing.Hash, t time.Time) error { + d.cleanPackList() + + path := d.objectPackPath(hash, `pack`) + if !t.IsZero() { + fi, err := d.fs.Stat(path) + if err != nil { + return err + } + // too new, skip deletion. + if !fi.ModTime().Before(t) { + return nil + } + } + err := d.fs.Remove(path) + if err != nil { + return err + } + return d.fs.Remove(d.objectPackPath(hash, `idx`)) +} + +// NewObject return a writer for a new object file. +func (d *DotGit) NewObject() (*ObjectWriter, error) { + d.cleanObjectList() + + return newObjectWriter(d.fs) +} + +// ObjectsWithPrefix returns the hashes of objects that have the given prefix. +func (d *DotGit) ObjectsWithPrefix(prefix []byte) ([]plumbing.Hash, error) { + // Handle edge cases. + if len(prefix) < 1 { + return d.Objects() + } else if len(prefix) > len(plumbing.ZeroHash) { + return nil, nil + } + + if d.options.ExclusiveAccess { + err := d.genObjectList() + if err != nil { + return nil, err + } + + // Rely on d.objectList being sorted. + // Figure out the half-open interval defined by the prefix. + first := sort.Search(len(d.objectList), func(i int) bool { + // Same as plumbing.HashSlice.Less. + return bytes.Compare(d.objectList[i][:], prefix) >= 0 + }) + lim := len(d.objectList) + if limPrefix, overflow := incBytes(prefix); !overflow { + lim = sort.Search(len(d.objectList), func(i int) bool { + // Same as plumbing.HashSlice.Less. + return bytes.Compare(d.objectList[i][:], limPrefix) >= 0 + }) + } + return d.objectList[first:lim], nil + } + + // This is the slow path. + var objects []plumbing.Hash + var n int + err := d.ForEachObjectHash(func(hash plumbing.Hash) error { + n++ + if bytes.HasPrefix(hash[:], prefix) { + objects = append(objects, hash) + } + return nil + }) + if err != nil { + return nil, err + } + return objects, nil +} + +// Objects returns a slice with the hashes of objects found under the +// .git/objects/ directory. +func (d *DotGit) Objects() ([]plumbing.Hash, error) { + if d.options.ExclusiveAccess { + err := d.genObjectList() + if err != nil { + return nil, err + } + + return d.objectList, nil + } + + var objects []plumbing.Hash + err := d.ForEachObjectHash(func(hash plumbing.Hash) error { + objects = append(objects, hash) + return nil + }) + if err != nil { + return nil, err + } + return objects, nil +} + +// ForEachObjectHash iterates over the hashes of objects found under the +// .git/objects/ directory and executes the provided function. +func (d *DotGit) ForEachObjectHash(fun func(plumbing.Hash) error) error { + if !d.options.ExclusiveAccess { + return d.forEachObjectHash(fun) + } + + err := d.genObjectList() + if err != nil { + return err + } + + for _, h := range d.objectList { + err := fun(h) + if err != nil { + return err + } + } + + return nil +} + +func (d *DotGit) forEachObjectHash(fun func(plumbing.Hash) error) error { + files, err := d.fs.ReadDir(objectsPath) + if err != nil { + if os.IsNotExist(err) { + return nil + } + + return err + } + + for _, f := range files { + if f.IsDir() && len(f.Name()) == 2 && isHex(f.Name()) { + base := f.Name() + d, err := d.fs.ReadDir(d.fs.Join(objectsPath, base)) + if err != nil { + return err + } + + for _, o := range d { + h := plumbing.NewHash(base + o.Name()) + if h.IsZero() { + // Ignore files with badly-formatted names. + continue + } + err = fun(h) + if err != nil { + return err + } + } + } + } + + return nil +} + +func (d *DotGit) cleanObjectList() { + d.objectMap = nil + d.objectList = nil +} + +func (d *DotGit) genObjectList() error { + if d.objectMap != nil { + return nil + } + + d.objectMap = make(map[plumbing.Hash]struct{}) + populate := func(h plumbing.Hash) error { + d.objectList = append(d.objectList, h) + d.objectMap[h] = struct{}{} + + return nil + } + if err := d.forEachObjectHash(populate); err != nil { + return err + } + plumbing.HashesSort(d.objectList) + return nil +} + +func (d *DotGit) hasObject(h plumbing.Hash) error { + if !d.options.ExclusiveAccess { + return nil + } + + err := d.genObjectList() + if err != nil { + return err + } + + _, ok := d.objectMap[h] + if !ok { + return plumbing.ErrObjectNotFound + } + + return nil +} + +func (d *DotGit) cleanPackList() { + d.packMap = nil + d.packList = nil +} + +func (d *DotGit) genPackList() error { + if d.packMap != nil { + return nil + } + + op, err := d.objectPacks() + if err != nil { + return err + } + + d.packMap = make(map[plumbing.Hash]struct{}) + d.packList = nil + + for _, h := range op { + d.packList = append(d.packList, h) + d.packMap[h] = struct{}{} + } + + return nil +} + +func (d *DotGit) hasPack(h plumbing.Hash) error { + if !d.options.ExclusiveAccess { + return nil + } + + err := d.genPackList() + if err != nil { + return err + } + + _, ok := d.packMap[h] + if !ok { + return ErrPackfileNotFound + } + + return nil +} + +func (d *DotGit) objectPath(h plumbing.Hash) string { + hash := h.String() + return d.fs.Join(objectsPath, hash[0:2], hash[2:40]) +} + +// incomingObjectPath is intended to add support for a git pre-receive hook +// to be written it adds support for go-git to find objects in an "incoming" +// directory, so that the library can be used to write a pre-receive hook +// that deals with the incoming objects. +// +// More on git hooks found here : https://git-scm.com/docs/githooks +// More on 'quarantine'/incoming directory here: +// https://git-scm.com/docs/git-receive-pack +func (d *DotGit) incomingObjectPath(h plumbing.Hash) string { + hString := h.String() + + if d.incomingDirName == "" { + return d.fs.Join(objectsPath, hString[0:2], hString[2:40]) + } + + return d.fs.Join(objectsPath, d.incomingDirName, hString[0:2], hString[2:40]) +} + +// hasIncomingObjects searches for an incoming directory and keeps its name +// so it doesn't have to be found each time an object is accessed. +func (d *DotGit) hasIncomingObjects() bool { + if !d.incomingChecked { + directoryContents, err := d.fs.ReadDir(objectsPath) + if err == nil { + for _, file := range directoryContents { + if strings.HasPrefix(file.Name(), "incoming-") && file.IsDir() { + d.incomingDirName = file.Name() + } + } + } + + d.incomingChecked = true + } + + return d.incomingDirName != "" +} + +// Object returns a fs.File pointing the object file, if exists +func (d *DotGit) Object(h plumbing.Hash) (billy.File, error) { + err := d.hasObject(h) + if err != nil { + return nil, err + } + + obj1, err1 := d.fs.Open(d.objectPath(h)) + if os.IsNotExist(err1) && d.hasIncomingObjects() { + obj2, err2 := d.fs.Open(d.incomingObjectPath(h)) + if err2 != nil { + return obj1, err1 + } + return obj2, err2 + } + return obj1, err1 +} + +// ObjectStat returns a os.FileInfo pointing the object file, if exists +func (d *DotGit) ObjectStat(h plumbing.Hash) (os.FileInfo, error) { + err := d.hasObject(h) + if err != nil { + return nil, err + } + + obj1, err1 := d.fs.Stat(d.objectPath(h)) + if os.IsNotExist(err1) && d.hasIncomingObjects() { + obj2, err2 := d.fs.Stat(d.incomingObjectPath(h)) + if err2 != nil { + return obj1, err1 + } + return obj2, err2 + } + return obj1, err1 +} + +// ObjectDelete removes the object file, if exists +func (d *DotGit) ObjectDelete(h plumbing.Hash) error { + d.cleanObjectList() + + err1 := d.fs.Remove(d.objectPath(h)) + if os.IsNotExist(err1) && d.hasIncomingObjects() { + err2 := d.fs.Remove(d.incomingObjectPath(h)) + if err2 != nil { + return err1 + } + return err2 + } + return err1 +} + +func (d *DotGit) readReferenceFrom(rd io.Reader, name string) (ref *plumbing.Reference, err error) { + b, err := stdioutil.ReadAll(rd) + if err != nil { + return nil, err + } + + line := strings.TrimSpace(string(b)) + return plumbing.NewReferenceFromStrings(name, line), nil +} + +func (d *DotGit) checkReferenceAndTruncate(f billy.File, old *plumbing.Reference) error { + if old == nil { + return nil + } + ref, err := d.readReferenceFrom(f, old.Name().String()) + if err != nil { + return err + } + if ref.Hash() != old.Hash() { + return storage.ErrReferenceHasChanged + } + _, err = f.Seek(0, io.SeekStart) + if err != nil { + return err + } + return f.Truncate(0) +} + +func (d *DotGit) SetRef(r, old *plumbing.Reference) error { + var content string + switch r.Type() { + case plumbing.SymbolicReference: + content = fmt.Sprintf("ref: %s\n", r.Target()) + case plumbing.HashReference: + content = fmt.Sprintln(r.Hash().String()) + } + + fileName := r.Name().String() + + return d.setRef(fileName, content, old) +} + +// Refs scans the git directory collecting references, which it returns. +// Symbolic references are resolved and included in the output. +func (d *DotGit) Refs() ([]*plumbing.Reference, error) { + var refs []*plumbing.Reference + var seen = make(map[plumbing.ReferenceName]bool) + if err := d.addRefsFromRefDir(&refs, seen); err != nil { + return nil, err + } + + if err := d.addRefsFromPackedRefs(&refs, seen); err != nil { + return nil, err + } + + if err := d.addRefFromHEAD(&refs); err != nil { + return nil, err + } + + return refs, nil +} + +// Ref returns the reference for a given reference name. +func (d *DotGit) Ref(name plumbing.ReferenceName) (*plumbing.Reference, error) { + ref, err := d.readReferenceFile(".", name.String()) + if err == nil { + return ref, nil + } + + return d.packedRef(name) +} + +func (d *DotGit) findPackedRefsInFile(f billy.File) ([]*plumbing.Reference, error) { + s := bufio.NewScanner(f) + var refs []*plumbing.Reference + for s.Scan() { + ref, err := d.processLine(s.Text()) + if err != nil { + return nil, err + } + + if ref != nil { + refs = append(refs, ref) + } + } + + return refs, s.Err() +} + +func (d *DotGit) findPackedRefs() (r []*plumbing.Reference, err error) { + f, err := d.fs.Open(packedRefsPath) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, err + } + + defer ioutil.CheckClose(f, &err) + return d.findPackedRefsInFile(f) +} + +func (d *DotGit) packedRef(name plumbing.ReferenceName) (*plumbing.Reference, error) { + refs, err := d.findPackedRefs() + if err != nil { + return nil, err + } + + for _, ref := range refs { + if ref.Name() == name { + return ref, nil + } + } + + return nil, plumbing.ErrReferenceNotFound +} + +// RemoveRef removes a reference by name. +func (d *DotGit) RemoveRef(name plumbing.ReferenceName) error { + path := d.fs.Join(".", name.String()) + _, err := d.fs.Stat(path) + if err == nil { + err = d.fs.Remove(path) + // Drop down to remove it from the packed refs file, too. + } + + if err != nil && !os.IsNotExist(err) { + return err + } + + return d.rewritePackedRefsWithoutRef(name) +} + +func (d *DotGit) addRefsFromPackedRefs(refs *[]*plumbing.Reference, seen map[plumbing.ReferenceName]bool) (err error) { + packedRefs, err := d.findPackedRefs() + if err != nil { + return err + } + + for _, ref := range packedRefs { + if !seen[ref.Name()] { + *refs = append(*refs, ref) + seen[ref.Name()] = true + } + } + return nil +} + +func (d *DotGit) addRefsFromPackedRefsFile(refs *[]*plumbing.Reference, f billy.File, seen map[plumbing.ReferenceName]bool) (err error) { + packedRefs, err := d.findPackedRefsInFile(f) + if err != nil { + return err + } + + for _, ref := range packedRefs { + if !seen[ref.Name()] { + *refs = append(*refs, ref) + seen[ref.Name()] = true + } + } + return nil +} + +func (d *DotGit) openAndLockPackedRefs(doCreate bool) ( + pr billy.File, err error) { + var f billy.File + defer func() { + if err != nil && f != nil { + ioutil.CheckClose(f, &err) + } + }() + + // File mode is retrieved from a constant defined in the target specific + // files (dotgit_rewrite_packed_refs_*). Some modes are not available + // in all filesystems. + openFlags := d.openAndLockPackedRefsMode() + if doCreate { + openFlags |= os.O_CREATE + } + + // Keep trying to open and lock the file until we're sure the file + // didn't change between the open and the lock. + for { + f, err = d.fs.OpenFile(packedRefsPath, openFlags, 0600) + if err != nil { + if os.IsNotExist(err) && !doCreate { + return nil, nil + } + + return nil, err + } + fi, err := d.fs.Stat(packedRefsPath) + if err != nil { + return nil, err + } + mtime := fi.ModTime() + + err = f.Lock() + if err != nil { + return nil, err + } + + fi, err = d.fs.Stat(packedRefsPath) + if err != nil { + return nil, err + } + if mtime.Equal(fi.ModTime()) { + break + } + // The file has changed since we opened it. Close and retry. + err = f.Close() + if err != nil { + return nil, err + } + } + return f, nil +} + +func (d *DotGit) rewritePackedRefsWithoutRef(name plumbing.ReferenceName) (err error) { + pr, err := d.openAndLockPackedRefs(false) + if err != nil { + return err + } + if pr == nil { + return nil + } + defer ioutil.CheckClose(pr, &err) + + // Creating the temp file in the same directory as the target file + // improves our chances for rename operation to be atomic. + tmp, err := d.fs.TempFile("", tmpPackedRefsPrefix) + if err != nil { + return err + } + tmpName := tmp.Name() + defer func() { + ioutil.CheckClose(tmp, &err) + _ = d.fs.Remove(tmpName) // don't check err, we might have renamed it + }() + + s := bufio.NewScanner(pr) + found := false + for s.Scan() { + line := s.Text() + ref, err := d.processLine(line) + if err != nil { + return err + } + + if ref != nil && ref.Name() == name { + found = true + continue + } + + if _, err := fmt.Fprintln(tmp, line); err != nil { + return err + } + } + + if err := s.Err(); err != nil { + return err + } + + if !found { + return nil + } + + return d.rewritePackedRefsWhileLocked(tmp, pr) +} + +// process lines from a packed-refs file +func (d *DotGit) processLine(line string) (*plumbing.Reference, error) { + if len(line) == 0 { + return nil, nil + } + + switch line[0] { + case '#': // comment - ignore + return nil, nil + case '^': // annotated tag commit of the previous line - ignore + return nil, nil + default: + ws := strings.Split(line, " ") // hash then ref + if len(ws) != 2 { + return nil, ErrPackedRefsBadFormat + } + + return plumbing.NewReferenceFromStrings(ws[1], ws[0]), nil + } +} + +func (d *DotGit) addRefsFromRefDir(refs *[]*plumbing.Reference, seen map[plumbing.ReferenceName]bool) error { + return d.walkReferencesTree(refs, []string{refsPath}, seen) +} + +func (d *DotGit) walkReferencesTree(refs *[]*plumbing.Reference, relPath []string, seen map[plumbing.ReferenceName]bool) error { + files, err := d.fs.ReadDir(d.fs.Join(relPath...)) + if err != nil { + if os.IsNotExist(err) { + return nil + } + + return err + } + + for _, f := range files { + newRelPath := append(append([]string(nil), relPath...), f.Name()) + if f.IsDir() { + if err = d.walkReferencesTree(refs, newRelPath, seen); err != nil { + return err + } + + continue + } + + ref, err := d.readReferenceFile(".", strings.Join(newRelPath, "/")) + if err != nil { + return err + } + + if ref != nil && !seen[ref.Name()] { + *refs = append(*refs, ref) + seen[ref.Name()] = true + } + } + + return nil +} + +func (d *DotGit) addRefFromHEAD(refs *[]*plumbing.Reference) error { + ref, err := d.readReferenceFile(".", "HEAD") + if err != nil { + if os.IsNotExist(err) { + return nil + } + + return err + } + + *refs = append(*refs, ref) + return nil +} + +func (d *DotGit) readReferenceFile(path, name string) (ref *plumbing.Reference, err error) { + path = d.fs.Join(path, d.fs.Join(strings.Split(name, "/")...)) + st, err := d.fs.Stat(path) + if err != nil { + return nil, err + } + if st.IsDir() { + return nil, ErrIsDir + } + + f, err := d.fs.Open(path) + if err != nil { + return nil, err + } + defer ioutil.CheckClose(f, &err) + + return d.readReferenceFrom(f, name) +} + +func (d *DotGit) CountLooseRefs() (int, error) { + var refs []*plumbing.Reference + var seen = make(map[plumbing.ReferenceName]bool) + if err := d.addRefsFromRefDir(&refs, seen); err != nil { + return 0, err + } + + return len(refs), nil +} + +// PackRefs packs all loose refs into the packed-refs file. +// +// This implementation only works under the assumption that the view +// of the file system won't be updated during this operation. This +// strategy would not work on a general file system though, without +// locking each loose reference and checking it again before deleting +// the file, because otherwise an updated reference could sneak in and +// then be deleted by the packed-refs process. Alternatively, every +// ref update could also lock packed-refs, so only one lock is +// required during ref-packing. But that would worsen performance in +// the common case. +// +// TODO: add an "all" boolean like the `git pack-refs --all` flag. +// When `all` is false, it would only pack refs that have already been +// packed, plus all tags. +func (d *DotGit) PackRefs() (err error) { + // Lock packed-refs, and create it if it doesn't exist yet. + f, err := d.openAndLockPackedRefs(true) + if err != nil { + return err + } + defer ioutil.CheckClose(f, &err) + + // Gather all refs using addRefsFromRefDir and addRefsFromPackedRefs. + var refs []*plumbing.Reference + seen := make(map[plumbing.ReferenceName]bool) + if err = d.addRefsFromRefDir(&refs, seen); err != nil { + return err + } + if len(refs) == 0 { + // Nothing to do! + return nil + } + numLooseRefs := len(refs) + if err = d.addRefsFromPackedRefsFile(&refs, f, seen); err != nil { + return err + } + + // Write them all to a new temp packed-refs file. + tmp, err := d.fs.TempFile("", tmpPackedRefsPrefix) + if err != nil { + return err + } + tmpName := tmp.Name() + defer func() { + ioutil.CheckClose(tmp, &err) + _ = d.fs.Remove(tmpName) // don't check err, we might have renamed it + }() + + w := bufio.NewWriter(tmp) + for _, ref := range refs { + _, err = w.WriteString(ref.String() + "\n") + if err != nil { + return err + } + } + err = w.Flush() + if err != nil { + return err + } + + // Rename the temp packed-refs file. + err = d.rewritePackedRefsWhileLocked(tmp, f) + if err != nil { + return err + } + + // Delete all the loose refs, while still holding the packed-refs + // lock. + for _, ref := range refs[:numLooseRefs] { + path := d.fs.Join(".", ref.Name().String()) + err = d.fs.Remove(path) + if err != nil && !os.IsNotExist(err) { + return err + } + } + + return nil +} + +// Module return a billy.Filesystem pointing to the module folder +func (d *DotGit) Module(name string) (billy.Filesystem, error) { + return d.fs.Chroot(d.fs.Join(modulePath, name)) +} + +// Alternates returns DotGit(s) based off paths in objects/info/alternates if +// available. This can be used to checks if it's a shared repository. +func (d *DotGit) Alternates() ([]*DotGit, error) { + altpath := d.fs.Join("objects", "info", "alternates") + f, err := d.fs.Open(altpath) + if err != nil { + return nil, err + } + defer f.Close() + + var alternates []*DotGit + + // Read alternate paths line-by-line and create DotGit objects. + scanner := bufio.NewScanner(f) + for scanner.Scan() { + path := scanner.Text() + if !filepath.IsAbs(path) { + // For relative paths, we can perform an internal conversion to + // slash so that they work cross-platform. + slashPath := filepath.ToSlash(path) + // If the path is not absolute, it must be relative to object + // database (.git/objects/info). + // https://www.kernel.org/pub/software/scm/git/docs/gitrepository-layout.html + // Hence, derive a path relative to DotGit's root. + // "../../../reponame/.git/" -> "../../reponame/.git" + // Remove the first ../ + relpath := filepath.Join(strings.Split(slashPath, "/")[1:]...) + normalPath := filepath.FromSlash(relpath) + path = filepath.Join(d.fs.Root(), normalPath) + } + fs := osfs.New(filepath.Dir(path)) + alternates = append(alternates, New(fs)) + } + + if err = scanner.Err(); err != nil { + return nil, err + } + + return alternates, nil +} + +// Fs returns the underlying filesystem of the DotGit folder. +func (d *DotGit) Fs() billy.Filesystem { + return d.fs +} + +func isHex(s string) bool { + for _, b := range []byte(s) { + if isNum(b) { + continue + } + if isHexAlpha(b) { + continue + } + + return false + } + + return true +} + +func isNum(b byte) bool { + return b >= '0' && b <= '9' +} + +func isHexAlpha(b byte) bool { + return b >= 'a' && b <= 'f' || b >= 'A' && b <= 'F' +} + +// incBytes increments a byte slice, which involves incrementing the +// right-most byte, and following carry leftward. +// It makes a copy so that the provided slice's underlying array is not modified. +// If the overall operation overflows (e.g. incBytes(0xff, 0xff)), the second return parameter indicates that. +func incBytes(in []byte) (out []byte, overflow bool) { + out = make([]byte, len(in)) + copy(out, in) + for i := len(out) - 1; i >= 0; i-- { + out[i]++ + if out[i] != 0 { + return // Didn't overflow. + } + } + overflow = true + return +} diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go new file mode 100644 index 0000000000..43263eadfa --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go @@ -0,0 +1,81 @@ +package dotgit + +import ( + "io" + "os" + "runtime" + + "github.com/go-git/go-billy/v5" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +func (d *DotGit) openAndLockPackedRefsMode() int { + if billy.CapabilityCheck(d.fs, billy.ReadAndWriteCapability) { + return os.O_RDWR + } + + return os.O_RDONLY +} + +func (d *DotGit) rewritePackedRefsWhileLocked( + tmp billy.File, pr billy.File) error { + // Try plain rename. If we aren't using the bare Windows filesystem as the + // storage layer, we might be able to get away with a rename over a locked + // file. + err := d.fs.Rename(tmp.Name(), pr.Name()) + if err == nil { + return nil + } + + // If we are in a filesystem that does not support rename (e.g. sivafs) + // a full copy is done. + if err == billy.ErrNotSupported { + return d.copyNewFile(tmp, pr) + } + + if runtime.GOOS != "windows" { + return err + } + + // Otherwise, Windows doesn't let us rename over a locked file, so + // we have to do a straight copy. Unfortunately this could result + // in a partially-written file if the process fails before the + // copy completes. + return d.copyToExistingFile(tmp, pr) +} + +func (d *DotGit) copyToExistingFile(tmp, pr billy.File) error { + _, err := pr.Seek(0, io.SeekStart) + if err != nil { + return err + } + err = pr.Truncate(0) + if err != nil { + return err + } + _, err = tmp.Seek(0, io.SeekStart) + if err != nil { + return err + } + _, err = io.Copy(pr, tmp) + + return err +} + +func (d *DotGit) copyNewFile(tmp billy.File, pr billy.File) (err error) { + prWrite, err := d.fs.Create(pr.Name()) + if err != nil { + return err + } + + defer ioutil.CheckClose(prWrite, &err) + + _, err = tmp.Seek(0, io.SeekStart) + if err != nil { + return err + } + + _, err = io.Copy(prWrite, tmp) + + return err +} diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit_setref.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit_setref.go new file mode 100644 index 0000000000..c057f5c486 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit_setref.go @@ -0,0 +1,90 @@ +package dotgit + +import ( + "fmt" + "os" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/utils/ioutil" + + "github.com/go-git/go-billy/v5" +) + +func (d *DotGit) setRef(fileName, content string, old *plumbing.Reference) (err error) { + if billy.CapabilityCheck(d.fs, billy.ReadAndWriteCapability) { + return d.setRefRwfs(fileName, content, old) + } + + return d.setRefNorwfs(fileName, content, old) +} + +func (d *DotGit) setRefRwfs(fileName, content string, old *plumbing.Reference) (err error) { + // If we are not checking an old ref, just truncate the file. + mode := os.O_RDWR | os.O_CREATE + if old == nil { + mode |= os.O_TRUNC + } + + f, err := d.fs.OpenFile(fileName, mode, 0666) + if err != nil { + return err + } + + defer ioutil.CheckClose(f, &err) + + // Lock is unlocked by the deferred Close above. This is because Unlock + // does not imply a fsync and thus there would be a race between + // Unlock+Close and other concurrent writers. Adding Sync to go-billy + // could work, but this is better (and avoids superfluous syncs). + err = f.Lock() + if err != nil { + return err + } + + // this is a no-op to call even when old is nil. + err = d.checkReferenceAndTruncate(f, old) + if err != nil { + return err + } + + _, err = f.Write([]byte(content)) + return err +} + +// There are some filesystems that don't support opening files in RDWD mode. +// In these filesystems the standard SetRef function can not be used as it +// reads the reference file to check that it's not modified before updating it. +// +// This version of the function writes the reference without extra checks +// making it compatible with these simple filesystems. This is usually not +// a problem as they should be accessed by only one process at a time. +func (d *DotGit) setRefNorwfs(fileName, content string, old *plumbing.Reference) error { + _, err := d.fs.Stat(fileName) + if err == nil && old != nil { + fRead, err := d.fs.Open(fileName) + if err != nil { + return err + } + + ref, err := d.readReferenceFrom(fRead, old.Name().String()) + fRead.Close() + + if err != nil { + return err + } + + if ref.Hash() != old.Hash() { + return fmt.Errorf("reference has changed concurrently") + } + } + + f, err := d.fs.Create(fileName) + if err != nil { + return err + } + + defer f.Close() + + _, err = f.Write([]byte(content)) + return err +} diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/repository_filesystem.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/repository_filesystem.go new file mode 100644 index 0000000000..8d243efea1 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/repository_filesystem.go @@ -0,0 +1,111 @@ +package dotgit + +import ( + "os" + "path/filepath" + "strings" + + "github.com/go-git/go-billy/v5" +) + +// RepositoryFilesystem is a billy.Filesystem compatible object wrapper +// which handles dot-git filesystem operations and supports commondir according to git scm layout: +// https://github.com/git/git/blob/master/Documentation/gitrepository-layout.txt +type RepositoryFilesystem struct { + dotGitFs billy.Filesystem + commonDotGitFs billy.Filesystem +} + +func NewRepositoryFilesystem(dotGitFs, commonDotGitFs billy.Filesystem) *RepositoryFilesystem { + return &RepositoryFilesystem{ + dotGitFs: dotGitFs, + commonDotGitFs: commonDotGitFs, + } +} + +func (fs *RepositoryFilesystem) mapToRepositoryFsByPath(path string) billy.Filesystem { + // Nothing to decide if commondir not defined + if fs.commonDotGitFs == nil { + return fs.dotGitFs + } + + cleanPath := filepath.Clean(path) + + // Check exceptions for commondir (https://git-scm.com/docs/gitrepository-layout#Documentation/gitrepository-layout.txt) + switch cleanPath { + case fs.dotGitFs.Join(logsPath, "HEAD"): + return fs.dotGitFs + case fs.dotGitFs.Join(refsPath, "bisect"), fs.dotGitFs.Join(refsPath, "rewritten"), fs.dotGitFs.Join(refsPath, "worktree"): + return fs.dotGitFs + } + + // Determine dot-git root by first path element. + // There are some elements which should always use commondir when commondir defined. + // Usual dot-git root will be used for the rest of files. + switch strings.Split(cleanPath, string(filepath.Separator))[0] { + case objectsPath, refsPath, packedRefsPath, configPath, branchesPath, hooksPath, infoPath, remotesPath, logsPath, shallowPath, worktreesPath: + return fs.commonDotGitFs + default: + return fs.dotGitFs + } +} + +func (fs *RepositoryFilesystem) Create(filename string) (billy.File, error) { + return fs.mapToRepositoryFsByPath(filename).Create(filename) +} + +func (fs *RepositoryFilesystem) Open(filename string) (billy.File, error) { + return fs.mapToRepositoryFsByPath(filename).Open(filename) +} + +func (fs *RepositoryFilesystem) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) { + return fs.mapToRepositoryFsByPath(filename).OpenFile(filename, flag, perm) +} + +func (fs *RepositoryFilesystem) Stat(filename string) (os.FileInfo, error) { + return fs.mapToRepositoryFsByPath(filename).Stat(filename) +} + +func (fs *RepositoryFilesystem) Rename(oldpath, newpath string) error { + return fs.mapToRepositoryFsByPath(oldpath).Rename(oldpath, newpath) +} + +func (fs *RepositoryFilesystem) Remove(filename string) error { + return fs.mapToRepositoryFsByPath(filename).Remove(filename) +} + +func (fs *RepositoryFilesystem) Join(elem ...string) string { + return fs.dotGitFs.Join(elem...) +} + +func (fs *RepositoryFilesystem) TempFile(dir, prefix string) (billy.File, error) { + return fs.mapToRepositoryFsByPath(dir).TempFile(dir, prefix) +} + +func (fs *RepositoryFilesystem) ReadDir(path string) ([]os.FileInfo, error) { + return fs.mapToRepositoryFsByPath(path).ReadDir(path) +} + +func (fs *RepositoryFilesystem) MkdirAll(filename string, perm os.FileMode) error { + return fs.mapToRepositoryFsByPath(filename).MkdirAll(filename, perm) +} + +func (fs *RepositoryFilesystem) Lstat(filename string) (os.FileInfo, error) { + return fs.mapToRepositoryFsByPath(filename).Lstat(filename) +} + +func (fs *RepositoryFilesystem) Symlink(target, link string) error { + return fs.mapToRepositoryFsByPath(target).Symlink(target, link) +} + +func (fs *RepositoryFilesystem) Readlink(link string) (string, error) { + return fs.mapToRepositoryFsByPath(link).Readlink(link) +} + +func (fs *RepositoryFilesystem) Chroot(path string) (billy.Filesystem, error) { + return fs.mapToRepositoryFsByPath(path).Chroot(path) +} + +func (fs *RepositoryFilesystem) Root() string { + return fs.dotGitFs.Root() +} diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/writers.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/writers.go new file mode 100644 index 0000000000..e2ede938cc --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/writers.go @@ -0,0 +1,284 @@ +package dotgit + +import ( + "fmt" + "io" + "sync/atomic" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/format/idxfile" + "github.com/go-git/go-git/v5/plumbing/format/objfile" + "github.com/go-git/go-git/v5/plumbing/format/packfile" + + "github.com/go-git/go-billy/v5" +) + +// PackWriter is a io.Writer that generates the packfile index simultaneously, +// a packfile.Decoder is used with a file reader to read the file being written +// this operation is synchronized with the write operations. +// The packfile is written in a temp file, when Close is called this file +// is renamed/moved (depends on the Filesystem implementation) to the final +// location, if the PackWriter is not used, nothing is written +type PackWriter struct { + Notify func(plumbing.Hash, *idxfile.Writer) + + fs billy.Filesystem + fr, fw billy.File + synced *syncedReader + checksum plumbing.Hash + parser *packfile.Parser + writer *idxfile.Writer + result chan error +} + +func newPackWrite(fs billy.Filesystem) (*PackWriter, error) { + fw, err := fs.TempFile(fs.Join(objectsPath, packPath), "tmp_pack_") + if err != nil { + return nil, err + } + + fr, err := fs.Open(fw.Name()) + if err != nil { + return nil, err + } + + writer := &PackWriter{ + fs: fs, + fw: fw, + fr: fr, + synced: newSyncedReader(fw, fr), + result: make(chan error), + } + + go writer.buildIndex() + return writer, nil +} + +func (w *PackWriter) buildIndex() { + s := packfile.NewScanner(w.synced) + w.writer = new(idxfile.Writer) + var err error + w.parser, err = packfile.NewParser(s, w.writer) + if err != nil { + w.result <- err + return + } + + checksum, err := w.parser.Parse() + if err != nil { + w.result <- err + return + } + + w.checksum = checksum + w.result <- err +} + +// waitBuildIndex waits until buildIndex function finishes, this can terminate +// with a packfile.ErrEmptyPackfile, this means that nothing was written so we +// ignore the error +func (w *PackWriter) waitBuildIndex() error { + err := <-w.result + if err == packfile.ErrEmptyPackfile { + return nil + } + + return err +} + +func (w *PackWriter) Write(p []byte) (int, error) { + return w.synced.Write(p) +} + +// Close closes all the file descriptors and save the final packfile, if nothing +// was written, the tempfiles are deleted without writing a packfile. +func (w *PackWriter) Close() error { + defer func() { + if w.Notify != nil && w.writer != nil && w.writer.Finished() { + w.Notify(w.checksum, w.writer) + } + + close(w.result) + }() + + if err := w.synced.Close(); err != nil { + return err + } + + if err := w.waitBuildIndex(); err != nil { + return err + } + + if err := w.fr.Close(); err != nil { + return err + } + + if err := w.fw.Close(); err != nil { + return err + } + + if w.writer == nil || !w.writer.Finished() { + return w.clean() + } + + return w.save() +} + +func (w *PackWriter) clean() error { + return w.fs.Remove(w.fw.Name()) +} + +func (w *PackWriter) save() error { + base := w.fs.Join(objectsPath, packPath, fmt.Sprintf("pack-%s", w.checksum)) + idx, err := w.fs.Create(fmt.Sprintf("%s.idx", base)) + if err != nil { + return err + } + + if err := w.encodeIdx(idx); err != nil { + return err + } + + if err := idx.Close(); err != nil { + return err + } + + return w.fs.Rename(w.fw.Name(), fmt.Sprintf("%s.pack", base)) +} + +func (w *PackWriter) encodeIdx(writer io.Writer) error { + idx, err := w.writer.Index() + if err != nil { + return err + } + + e := idxfile.NewEncoder(writer) + _, err = e.Encode(idx) + return err +} + +type syncedReader struct { + w io.Writer + r io.ReadSeeker + + blocked, done uint32 + written, read uint64 + news chan bool +} + +func newSyncedReader(w io.Writer, r io.ReadSeeker) *syncedReader { + return &syncedReader{ + w: w, + r: r, + news: make(chan bool), + } +} + +func (s *syncedReader) Write(p []byte) (n int, err error) { + defer func() { + written := atomic.AddUint64(&s.written, uint64(n)) + read := atomic.LoadUint64(&s.read) + if written > read { + s.wake() + } + }() + + n, err = s.w.Write(p) + return +} + +func (s *syncedReader) Read(p []byte) (n int, err error) { + defer func() { atomic.AddUint64(&s.read, uint64(n)) }() + + for { + s.sleep() + n, err = s.r.Read(p) + if err == io.EOF && !s.isDone() && n == 0 { + continue + } + + break + } + + return +} + +func (s *syncedReader) isDone() bool { + return atomic.LoadUint32(&s.done) == 1 +} + +func (s *syncedReader) isBlocked() bool { + return atomic.LoadUint32(&s.blocked) == 1 +} + +func (s *syncedReader) wake() { + if s.isBlocked() { + atomic.StoreUint32(&s.blocked, 0) + s.news <- true + } +} + +func (s *syncedReader) sleep() { + read := atomic.LoadUint64(&s.read) + written := atomic.LoadUint64(&s.written) + if read >= written { + atomic.StoreUint32(&s.blocked, 1) + <-s.news + } + +} + +func (s *syncedReader) Seek(offset int64, whence int) (int64, error) { + if whence == io.SeekCurrent { + return s.r.Seek(offset, whence) + } + + p, err := s.r.Seek(offset, whence) + atomic.StoreUint64(&s.read, uint64(p)) + + return p, err +} + +func (s *syncedReader) Close() error { + atomic.StoreUint32(&s.done, 1) + close(s.news) + return nil +} + +type ObjectWriter struct { + objfile.Writer + fs billy.Filesystem + f billy.File +} + +func newObjectWriter(fs billy.Filesystem) (*ObjectWriter, error) { + f, err := fs.TempFile(fs.Join(objectsPath, packPath), "tmp_obj_") + if err != nil { + return nil, err + } + + return &ObjectWriter{ + Writer: (*objfile.NewWriter(f)), + fs: fs, + f: f, + }, nil +} + +func (w *ObjectWriter) Close() error { + if err := w.Writer.Close(); err != nil { + return err + } + + if err := w.f.Close(); err != nil { + return err + } + + return w.save() +} + +func (w *ObjectWriter) save() error { + hash := w.Hash().String() + file := w.fs.Join(objectsPath, hash[0:2], hash[2:40]) + + return w.fs.Rename(w.f.Name(), file) +} diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/index.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/index.go new file mode 100644 index 0000000000..a19176f83d --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/storage/filesystem/index.go @@ -0,0 +1,54 @@ +package filesystem + +import ( + "bufio" + "os" + + "github.com/go-git/go-git/v5/plumbing/format/index" + "github.com/go-git/go-git/v5/storage/filesystem/dotgit" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +type IndexStorage struct { + dir *dotgit.DotGit +} + +func (s *IndexStorage) SetIndex(idx *index.Index) (err error) { + f, err := s.dir.IndexWriter() + if err != nil { + return err + } + + defer ioutil.CheckClose(f, &err) + bw := bufio.NewWriter(f) + defer func() { + if e := bw.Flush(); err == nil && e != nil { + err = e + } + }() + + e := index.NewEncoder(bw) + err = e.Encode(idx) + return err +} + +func (s *IndexStorage) Index() (i *index.Index, err error) { + idx := &index.Index{ + Version: 2, + } + + f, err := s.dir.Index() + if err != nil { + if os.IsNotExist(err) { + return idx, nil + } + + return nil, err + } + + defer ioutil.CheckClose(f, &err) + + d := index.NewDecoder(bufio.NewReader(f)) + err = d.Decode(idx) + return idx, err +} diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/module.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/module.go new file mode 100644 index 0000000000..20336c1184 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/storage/filesystem/module.go @@ -0,0 +1,20 @@ +package filesystem + +import ( + "github.com/go-git/go-git/v5/plumbing/cache" + "github.com/go-git/go-git/v5/storage" + "github.com/go-git/go-git/v5/storage/filesystem/dotgit" +) + +type ModuleStorage struct { + dir *dotgit.DotGit +} + +func (s *ModuleStorage) Module(name string) (storage.Storer, error) { + fs, err := s.dir.Module(name) + if err != nil { + return nil, err + } + + return NewStorage(fs, cache.NewObjectLRUDefault()), nil +} diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/object.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/object.go new file mode 100644 index 0000000000..0c25dad613 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/storage/filesystem/object.go @@ -0,0 +1,848 @@ +package filesystem + +import ( + "bytes" + "io" + "os" + "time" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/cache" + "github.com/go-git/go-git/v5/plumbing/format/idxfile" + "github.com/go-git/go-git/v5/plumbing/format/objfile" + "github.com/go-git/go-git/v5/plumbing/format/packfile" + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/storage/filesystem/dotgit" + "github.com/go-git/go-git/v5/utils/ioutil" + + "github.com/go-git/go-billy/v5" +) + +type ObjectStorage struct { + options Options + + // objectCache is an object cache uses to cache delta's bases and also recently + // loaded loose objects + objectCache cache.Object + + dir *dotgit.DotGit + index map[plumbing.Hash]idxfile.Index + + packList []plumbing.Hash + packListIdx int + packfiles map[plumbing.Hash]*packfile.Packfile +} + +// NewObjectStorage creates a new ObjectStorage with the given .git directory and cache. +func NewObjectStorage(dir *dotgit.DotGit, objectCache cache.Object) *ObjectStorage { + return NewObjectStorageWithOptions(dir, objectCache, Options{}) +} + +// NewObjectStorageWithOptions creates a new ObjectStorage with the given .git directory, cache and extra options +func NewObjectStorageWithOptions(dir *dotgit.DotGit, objectCache cache.Object, ops Options) *ObjectStorage { + return &ObjectStorage{ + options: ops, + objectCache: objectCache, + dir: dir, + } +} + +func (s *ObjectStorage) requireIndex() error { + if s.index != nil { + return nil + } + + s.index = make(map[plumbing.Hash]idxfile.Index) + packs, err := s.dir.ObjectPacks() + if err != nil { + return err + } + + for _, h := range packs { + if err := s.loadIdxFile(h); err != nil { + return err + } + } + + return nil +} + +// Reindex indexes again all packfiles. Useful if git changed packfiles externally +func (s *ObjectStorage) Reindex() { + s.index = nil +} + +func (s *ObjectStorage) loadIdxFile(h plumbing.Hash) (err error) { + f, err := s.dir.ObjectPackIdx(h) + if err != nil { + return err + } + + defer ioutil.CheckClose(f, &err) + + idxf := idxfile.NewMemoryIndex() + d := idxfile.NewDecoder(f) + if err = d.Decode(idxf); err != nil { + return err + } + + s.index[h] = idxf + return err +} + +func (s *ObjectStorage) NewEncodedObject() plumbing.EncodedObject { + return &plumbing.MemoryObject{} +} + +func (s *ObjectStorage) PackfileWriter() (io.WriteCloser, error) { + if err := s.requireIndex(); err != nil { + return nil, err + } + + w, err := s.dir.NewObjectPack() + if err != nil { + return nil, err + } + + w.Notify = func(h plumbing.Hash, writer *idxfile.Writer) { + index, err := writer.Index() + if err == nil { + s.index[h] = index + } + } + + return w, nil +} + +// SetEncodedObject adds a new object to the storage. +func (s *ObjectStorage) SetEncodedObject(o plumbing.EncodedObject) (h plumbing.Hash, err error) { + if o.Type() == plumbing.OFSDeltaObject || o.Type() == plumbing.REFDeltaObject { + return plumbing.ZeroHash, plumbing.ErrInvalidType + } + + ow, err := s.dir.NewObject() + if err != nil { + return plumbing.ZeroHash, err + } + + defer ioutil.CheckClose(ow, &err) + + or, err := o.Reader() + if err != nil { + return plumbing.ZeroHash, err + } + + defer ioutil.CheckClose(or, &err) + + if err = ow.WriteHeader(o.Type(), o.Size()); err != nil { + return plumbing.ZeroHash, err + } + + if _, err = io.Copy(ow, or); err != nil { + return plumbing.ZeroHash, err + } + + return o.Hash(), err +} + +// HasEncodedObject returns nil if the object exists, without actually +// reading the object data from storage. +func (s *ObjectStorage) HasEncodedObject(h plumbing.Hash) (err error) { + // Check unpacked objects + f, err := s.dir.Object(h) + if err != nil { + if !os.IsNotExist(err) { + return err + } + // Fall through to check packed objects. + } else { + defer ioutil.CheckClose(f, &err) + return nil + } + + // Check packed objects. + if err := s.requireIndex(); err != nil { + return err + } + _, _, offset := s.findObjectInPackfile(h) + if offset == -1 { + return plumbing.ErrObjectNotFound + } + return nil +} + +func (s *ObjectStorage) encodedObjectSizeFromUnpacked(h plumbing.Hash) ( + size int64, err error) { + f, err := s.dir.Object(h) + if err != nil { + if os.IsNotExist(err) { + return 0, plumbing.ErrObjectNotFound + } + + return 0, err + } + + r, err := objfile.NewReader(f) + if err != nil { + return 0, err + } + defer ioutil.CheckClose(r, &err) + + _, size, err = r.Header() + return size, err +} + +func (s *ObjectStorage) packfile(idx idxfile.Index, pack plumbing.Hash) (*packfile.Packfile, error) { + if p := s.packfileFromCache(pack); p != nil { + return p, nil + } + + f, err := s.dir.ObjectPack(pack) + if err != nil { + return nil, err + } + + var p *packfile.Packfile + if s.objectCache != nil { + p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache) + } else { + p = packfile.NewPackfile(idx, s.dir.Fs(), f) + } + + return p, s.storePackfileInCache(pack, p) +} + +func (s *ObjectStorage) packfileFromCache(hash plumbing.Hash) *packfile.Packfile { + if s.packfiles == nil { + if s.options.KeepDescriptors { + s.packfiles = make(map[plumbing.Hash]*packfile.Packfile) + } else if s.options.MaxOpenDescriptors > 0 { + s.packList = make([]plumbing.Hash, s.options.MaxOpenDescriptors) + s.packfiles = make(map[plumbing.Hash]*packfile.Packfile, s.options.MaxOpenDescriptors) + } + } + + return s.packfiles[hash] +} + +func (s *ObjectStorage) storePackfileInCache(hash plumbing.Hash, p *packfile.Packfile) error { + if s.options.KeepDescriptors { + s.packfiles[hash] = p + return nil + } + + if s.options.MaxOpenDescriptors <= 0 { + return nil + } + + // start over as the limit of packList is hit + if s.packListIdx >= len(s.packList) { + s.packListIdx = 0 + } + + // close the existing packfile if open + if next := s.packList[s.packListIdx]; !next.IsZero() { + open := s.packfiles[next] + delete(s.packfiles, next) + if open != nil { + if err := open.Close(); err != nil { + return err + } + } + } + + // cache newly open packfile + s.packList[s.packListIdx] = hash + s.packfiles[hash] = p + s.packListIdx++ + + return nil +} + +func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) ( + size int64, err error) { + if err := s.requireIndex(); err != nil { + return 0, err + } + + pack, _, offset := s.findObjectInPackfile(h) + if offset == -1 { + return 0, plumbing.ErrObjectNotFound + } + + idx := s.index[pack] + hash, err := idx.FindHash(offset) + if err == nil { + obj, ok := s.objectCache.Get(hash) + if ok { + return obj.Size(), nil + } + } else if err != nil && err != plumbing.ErrObjectNotFound { + return 0, err + } + + p, err := s.packfile(idx, pack) + if err != nil { + return 0, err + } + + if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 { + defer ioutil.CheckClose(p, &err) + } + + return p.GetSizeByOffset(offset) +} + +// EncodedObjectSize returns the plaintext size of the given object, +// without actually reading the full object data from storage. +func (s *ObjectStorage) EncodedObjectSize(h plumbing.Hash) ( + size int64, err error) { + size, err = s.encodedObjectSizeFromUnpacked(h) + if err != nil && err != plumbing.ErrObjectNotFound { + return 0, err + } else if err == nil { + return size, nil + } + + return s.encodedObjectSizeFromPackfile(h) +} + +// EncodedObject returns the object with the given hash, by searching for it in +// the packfile and the git object directories. +func (s *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) { + var obj plumbing.EncodedObject + var err error + + if s.index != nil { + obj, err = s.getFromPackfile(h, false) + if err == plumbing.ErrObjectNotFound { + obj, err = s.getFromUnpacked(h) + } + } else { + obj, err = s.getFromUnpacked(h) + if err == plumbing.ErrObjectNotFound { + obj, err = s.getFromPackfile(h, false) + } + } + + // If the error is still object not found, check if it's a shared object + // repository. + if err == plumbing.ErrObjectNotFound { + dotgits, e := s.dir.Alternates() + if e == nil { + // Create a new object storage with the DotGit(s) and check for the + // required hash object. Skip when not found. + for _, dg := range dotgits { + o := NewObjectStorage(dg, s.objectCache) + enobj, enerr := o.EncodedObject(t, h) + if enerr != nil { + continue + } + return enobj, nil + } + } + } + + if err != nil { + return nil, err + } + + if plumbing.AnyObject != t && obj.Type() != t { + return nil, plumbing.ErrObjectNotFound + } + + return obj, nil +} + +// DeltaObject returns the object with the given hash, by searching for +// it in the packfile and the git object directories. +func (s *ObjectStorage) DeltaObject(t plumbing.ObjectType, + h plumbing.Hash) (plumbing.EncodedObject, error) { + obj, err := s.getFromUnpacked(h) + if err == plumbing.ErrObjectNotFound { + obj, err = s.getFromPackfile(h, true) + } + + if err != nil { + return nil, err + } + + if plumbing.AnyObject != t && obj.Type() != t { + return nil, plumbing.ErrObjectNotFound + } + + return obj, nil +} + +func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedObject, err error) { + f, err := s.dir.Object(h) + if err != nil { + if os.IsNotExist(err) { + return nil, plumbing.ErrObjectNotFound + } + + return nil, err + } + defer ioutil.CheckClose(f, &err) + + if cacheObj, found := s.objectCache.Get(h); found { + return cacheObj, nil + } + + obj = s.NewEncodedObject() + r, err := objfile.NewReader(f) + if err != nil { + return nil, err + } + + defer ioutil.CheckClose(r, &err) + + t, size, err := r.Header() + if err != nil { + return nil, err + } + + obj.SetType(t) + obj.SetSize(size) + w, err := obj.Writer() + if err != nil { + return nil, err + } + + defer ioutil.CheckClose(w, &err) + + s.objectCache.Put(obj) + + _, err = io.Copy(w, r) + return obj, err +} + +// Get returns the object with the given hash, by searching for it in +// the packfile. +func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) ( + plumbing.EncodedObject, error) { + + if err := s.requireIndex(); err != nil { + return nil, err + } + + pack, hash, offset := s.findObjectInPackfile(h) + if offset == -1 { + return nil, plumbing.ErrObjectNotFound + } + + idx := s.index[pack] + p, err := s.packfile(idx, pack) + if err != nil { + return nil, err + } + + if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 { + defer ioutil.CheckClose(p, &err) + } + + if canBeDelta { + return s.decodeDeltaObjectAt(p, offset, hash) + } + + return s.decodeObjectAt(p, offset) +} + +func (s *ObjectStorage) decodeObjectAt( + p *packfile.Packfile, + offset int64, +) (plumbing.EncodedObject, error) { + hash, err := p.FindHash(offset) + if err == nil { + obj, ok := s.objectCache.Get(hash) + if ok { + return obj, nil + } + } + + if err != nil && err != plumbing.ErrObjectNotFound { + return nil, err + } + + return p.GetByOffset(offset) +} + +func (s *ObjectStorage) decodeDeltaObjectAt( + p *packfile.Packfile, + offset int64, + hash plumbing.Hash, +) (plumbing.EncodedObject, error) { + scan := p.Scanner() + header, err := scan.SeekObjectHeader(offset) + if err != nil { + return nil, err + } + + var ( + base plumbing.Hash + ) + + switch header.Type { + case plumbing.REFDeltaObject: + base = header.Reference + case plumbing.OFSDeltaObject: + base, err = p.FindHash(header.OffsetReference) + if err != nil { + return nil, err + } + default: + return s.decodeObjectAt(p, offset) + } + + obj := &plumbing.MemoryObject{} + obj.SetType(header.Type) + w, err := obj.Writer() + if err != nil { + return nil, err + } + + if _, _, err := scan.NextObject(w); err != nil { + return nil, err + } + + return newDeltaObject(obj, hash, base, header.Length), nil +} + +func (s *ObjectStorage) findObjectInPackfile(h plumbing.Hash) (plumbing.Hash, plumbing.Hash, int64) { + for packfile, index := range s.index { + offset, err := index.FindOffset(h) + if err == nil { + return packfile, h, offset + } + } + + return plumbing.ZeroHash, plumbing.ZeroHash, -1 +} + +func (s *ObjectStorage) HashesWithPrefix(prefix []byte) ([]plumbing.Hash, error) { + hashes, err := s.dir.ObjectsWithPrefix(prefix) + if err != nil { + return nil, err + } + + // TODO: This could be faster with some idxfile changes, + // or diving into the packfile. + for _, index := range s.index { + ei, err := index.Entries() + if err != nil { + return nil, err + } + for { + e, err := ei.Next() + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + if bytes.HasPrefix(e.Hash[:], prefix) { + hashes = append(hashes, e.Hash) + } + } + ei.Close() + } + + return hashes, nil +} + +// IterEncodedObjects returns an iterator for all the objects in the packfile +// with the given type. +func (s *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.EncodedObjectIter, error) { + objects, err := s.dir.Objects() + if err != nil { + return nil, err + } + + seen := make(map[plumbing.Hash]struct{}) + var iters []storer.EncodedObjectIter + if len(objects) != 0 { + iters = append(iters, &objectsIter{s: s, t: t, h: objects}) + seen = hashListAsMap(objects) + } + + packi, err := s.buildPackfileIters(t, seen) + if err != nil { + return nil, err + } + + iters = append(iters, packi) + return storer.NewMultiEncodedObjectIter(iters), nil +} + +func (s *ObjectStorage) buildPackfileIters( + t plumbing.ObjectType, + seen map[plumbing.Hash]struct{}, +) (storer.EncodedObjectIter, error) { + if err := s.requireIndex(); err != nil { + return nil, err + } + + packs, err := s.dir.ObjectPacks() + if err != nil { + return nil, err + } + return &lazyPackfilesIter{ + hashes: packs, + open: func(h plumbing.Hash) (storer.EncodedObjectIter, error) { + pack, err := s.dir.ObjectPack(h) + if err != nil { + return nil, err + } + return newPackfileIter( + s.dir.Fs(), pack, t, seen, s.index[h], + s.objectCache, s.options.KeepDescriptors, + ) + }, + }, nil +} + +// Close closes all opened files. +func (s *ObjectStorage) Close() error { + var firstError error + if s.options.KeepDescriptors || s.options.MaxOpenDescriptors > 0 { + for _, packfile := range s.packfiles { + err := packfile.Close() + if firstError == nil && err != nil { + firstError = err + } + } + } + + s.packfiles = nil + s.dir.Close() + + return firstError +} + +type lazyPackfilesIter struct { + hashes []plumbing.Hash + open func(h plumbing.Hash) (storer.EncodedObjectIter, error) + cur storer.EncodedObjectIter +} + +func (it *lazyPackfilesIter) Next() (plumbing.EncodedObject, error) { + for { + if it.cur == nil { + if len(it.hashes) == 0 { + return nil, io.EOF + } + h := it.hashes[0] + it.hashes = it.hashes[1:] + + sub, err := it.open(h) + if err == io.EOF { + continue + } else if err != nil { + return nil, err + } + it.cur = sub + } + ob, err := it.cur.Next() + if err == io.EOF { + it.cur.Close() + it.cur = nil + continue + } else if err != nil { + return nil, err + } + return ob, nil + } +} + +func (it *lazyPackfilesIter) ForEach(cb func(plumbing.EncodedObject) error) error { + return storer.ForEachIterator(it, cb) +} + +func (it *lazyPackfilesIter) Close() { + if it.cur != nil { + it.cur.Close() + it.cur = nil + } + it.hashes = nil +} + +type packfileIter struct { + pack billy.File + iter storer.EncodedObjectIter + seen map[plumbing.Hash]struct{} + + // tells whether the pack file should be left open after iteration or not + keepPack bool +} + +// NewPackfileIter returns a new EncodedObjectIter for the provided packfile +// and object type. Packfile and index file will be closed after they're +// used. If keepPack is true the packfile won't be closed after the iteration +// finished. +func NewPackfileIter( + fs billy.Filesystem, + f billy.File, + idxFile billy.File, + t plumbing.ObjectType, + keepPack bool, +) (storer.EncodedObjectIter, error) { + idx := idxfile.NewMemoryIndex() + if err := idxfile.NewDecoder(idxFile).Decode(idx); err != nil { + return nil, err + } + + if err := idxFile.Close(); err != nil { + return nil, err + } + + seen := make(map[plumbing.Hash]struct{}) + return newPackfileIter(fs, f, t, seen, idx, nil, keepPack) +} + +func newPackfileIter( + fs billy.Filesystem, + f billy.File, + t plumbing.ObjectType, + seen map[plumbing.Hash]struct{}, + index idxfile.Index, + cache cache.Object, + keepPack bool, +) (storer.EncodedObjectIter, error) { + var p *packfile.Packfile + if cache != nil { + p = packfile.NewPackfileWithCache(index, fs, f, cache) + } else { + p = packfile.NewPackfile(index, fs, f) + } + + iter, err := p.GetByType(t) + if err != nil { + return nil, err + } + + return &packfileIter{ + pack: f, + iter: iter, + seen: seen, + keepPack: keepPack, + }, nil +} + +func (iter *packfileIter) Next() (plumbing.EncodedObject, error) { + for { + obj, err := iter.iter.Next() + if err != nil { + return nil, err + } + + if _, ok := iter.seen[obj.Hash()]; ok { + continue + } + + return obj, nil + } +} + +func (iter *packfileIter) ForEach(cb func(plumbing.EncodedObject) error) error { + for { + o, err := iter.Next() + if err != nil { + if err == io.EOF { + iter.Close() + return nil + } + return err + } + + if err := cb(o); err != nil { + return err + } + } +} + +func (iter *packfileIter) Close() { + iter.iter.Close() + if !iter.keepPack { + _ = iter.pack.Close() + } +} + +type objectsIter struct { + s *ObjectStorage + t plumbing.ObjectType + h []plumbing.Hash +} + +func (iter *objectsIter) Next() (plumbing.EncodedObject, error) { + if len(iter.h) == 0 { + return nil, io.EOF + } + + obj, err := iter.s.getFromUnpacked(iter.h[0]) + iter.h = iter.h[1:] + + if err != nil { + return nil, err + } + + if iter.t != plumbing.AnyObject && iter.t != obj.Type() { + return iter.Next() + } + + return obj, err +} + +func (iter *objectsIter) ForEach(cb func(plumbing.EncodedObject) error) error { + for { + o, err := iter.Next() + if err != nil { + if err == io.EOF { + return nil + } + return err + } + + if err := cb(o); err != nil { + return err + } + } +} + +func (iter *objectsIter) Close() { + iter.h = []plumbing.Hash{} +} + +func hashListAsMap(l []plumbing.Hash) map[plumbing.Hash]struct{} { + m := make(map[plumbing.Hash]struct{}, len(l)) + for _, h := range l { + m[h] = struct{}{} + } + return m +} + +func (s *ObjectStorage) ForEachObjectHash(fun func(plumbing.Hash) error) error { + err := s.dir.ForEachObjectHash(fun) + if err == storer.ErrStop { + return nil + } + return err +} + +func (s *ObjectStorage) LooseObjectTime(hash plumbing.Hash) (time.Time, error) { + fi, err := s.dir.ObjectStat(hash) + if err != nil { + return time.Time{}, err + } + return fi.ModTime(), nil +} + +func (s *ObjectStorage) DeleteLooseObject(hash plumbing.Hash) error { + return s.dir.ObjectDelete(hash) +} + +func (s *ObjectStorage) ObjectPacks() ([]plumbing.Hash, error) { + return s.dir.ObjectPacks() +} + +func (s *ObjectStorage) DeleteOldObjectPackAndIndex(h plumbing.Hash, t time.Time) error { + return s.dir.DeleteOldObjectPackAndIndex(h, t) +} diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/reference.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/reference.go new file mode 100644 index 0000000000..aabcd7308d --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/storage/filesystem/reference.go @@ -0,0 +1,44 @@ +package filesystem + +import ( + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/storage/filesystem/dotgit" +) + +type ReferenceStorage struct { + dir *dotgit.DotGit +} + +func (r *ReferenceStorage) SetReference(ref *plumbing.Reference) error { + return r.dir.SetRef(ref, nil) +} + +func (r *ReferenceStorage) CheckAndSetReference(ref, old *plumbing.Reference) error { + return r.dir.SetRef(ref, old) +} + +func (r *ReferenceStorage) Reference(n plumbing.ReferenceName) (*plumbing.Reference, error) { + return r.dir.Ref(n) +} + +func (r *ReferenceStorage) IterReferences() (storer.ReferenceIter, error) { + refs, err := r.dir.Refs() + if err != nil { + return nil, err + } + + return storer.NewReferenceSliceIter(refs), nil +} + +func (r *ReferenceStorage) RemoveReference(n plumbing.ReferenceName) error { + return r.dir.RemoveRef(n) +} + +func (r *ReferenceStorage) CountLooseRefs() (int, error) { + return r.dir.CountLooseRefs() +} + +func (r *ReferenceStorage) PackRefs() error { + return r.dir.PackRefs() +} diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/shallow.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/shallow.go new file mode 100644 index 0000000000..afb600cf2d --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/storage/filesystem/shallow.go @@ -0,0 +1,54 @@ +package filesystem + +import ( + "bufio" + "fmt" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/storage/filesystem/dotgit" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +// ShallowStorage where the shallow commits are stored, an internal to +// manipulate the shallow file +type ShallowStorage struct { + dir *dotgit.DotGit +} + +// SetShallow save the shallows in the shallow file in the .git folder as one +// commit per line represented by 40-byte hexadecimal object terminated by a +// newline. +func (s *ShallowStorage) SetShallow(commits []plumbing.Hash) error { + f, err := s.dir.ShallowWriter() + if err != nil { + return err + } + + defer ioutil.CheckClose(f, &err) + for _, h := range commits { + if _, err := fmt.Fprintf(f, "%s\n", h); err != nil { + return err + } + } + + return err +} + +// Shallow return the shallow commits reading from shallo file from .git +func (s *ShallowStorage) Shallow() ([]plumbing.Hash, error) { + f, err := s.dir.Shallow() + if f == nil || err != nil { + return nil, err + } + + defer ioutil.CheckClose(f, &err) + + var hash []plumbing.Hash + + scn := bufio.NewScanner(f) + for scn.Scan() { + hash = append(hash, plumbing.NewHash(scn.Text())) + } + + return hash, scn.Err() +} diff --git a/vendor/github.com/go-git/go-git/v5/storage/filesystem/storage.go b/vendor/github.com/go-git/go-git/v5/storage/filesystem/storage.go new file mode 100644 index 0000000000..8b69b27b00 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/storage/filesystem/storage.go @@ -0,0 +1,73 @@ +// Package filesystem is a storage backend base on filesystems +package filesystem + +import ( + "github.com/go-git/go-git/v5/plumbing/cache" + "github.com/go-git/go-git/v5/storage/filesystem/dotgit" + + "github.com/go-git/go-billy/v5" +) + +// Storage is an implementation of git.Storer that stores data on disk in the +// standard git format (this is, the .git directory). Zero values of this type +// are not safe to use, see the NewStorage function below. +type Storage struct { + fs billy.Filesystem + dir *dotgit.DotGit + + ObjectStorage + ReferenceStorage + IndexStorage + ShallowStorage + ConfigStorage + ModuleStorage +} + +// Options holds configuration for the storage. +type Options struct { + // ExclusiveAccess means that the filesystem is not modified externally + // while the repo is open. + ExclusiveAccess bool + // KeepDescriptors makes the file descriptors to be reused but they will + // need to be manually closed calling Close(). + KeepDescriptors bool + // MaxOpenDescriptors is the max number of file descriptors to keep + // open. If KeepDescriptors is true, all file descriptors will remain open. + MaxOpenDescriptors int +} + +// NewStorage returns a new Storage backed by a given `fs.Filesystem` and cache. +func NewStorage(fs billy.Filesystem, cache cache.Object) *Storage { + return NewStorageWithOptions(fs, cache, Options{}) +} + +// NewStorageWithOptions returns a new Storage with extra options, +// backed by a given `fs.Filesystem` and cache. +func NewStorageWithOptions(fs billy.Filesystem, cache cache.Object, ops Options) *Storage { + dirOps := dotgit.Options{ + ExclusiveAccess: ops.ExclusiveAccess, + } + dir := dotgit.NewWithOptions(fs, dirOps) + + return &Storage{ + fs: fs, + dir: dir, + + ObjectStorage: *NewObjectStorageWithOptions(dir, cache, ops), + ReferenceStorage: ReferenceStorage{dir: dir}, + IndexStorage: IndexStorage{dir: dir}, + ShallowStorage: ShallowStorage{dir: dir}, + ConfigStorage: ConfigStorage{dir: dir}, + ModuleStorage: ModuleStorage{dir: dir}, + } +} + +// Filesystem returns the underlying filesystem +func (s *Storage) Filesystem() billy.Filesystem { + return s.fs +} + +// Init initializes .git directory +func (s *Storage) Init() error { + return s.dir.Initialize() +} diff --git a/vendor/github.com/go-git/go-git/v5/storage/memory/storage.go b/vendor/github.com/go-git/go-git/v5/storage/memory/storage.go new file mode 100644 index 0000000000..a8e56697b9 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/storage/memory/storage.go @@ -0,0 +1,320 @@ +// Package memory is a storage backend base on memory +package memory + +import ( + "fmt" + "time" + + "github.com/go-git/go-git/v5/config" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/format/index" + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/storage" +) + +var ErrUnsupportedObjectType = fmt.Errorf("unsupported object type") + +// Storage is an implementation of git.Storer that stores data on memory, being +// ephemeral. The use of this storage should be done in controlled environments, +// since the representation in memory of some repository can fill the machine +// memory. in the other hand this storage has the best performance. +type Storage struct { + ConfigStorage + ObjectStorage + ShallowStorage + IndexStorage + ReferenceStorage + ModuleStorage +} + +// NewStorage returns a new Storage base on memory +func NewStorage() *Storage { + return &Storage{ + ReferenceStorage: make(ReferenceStorage), + ConfigStorage: ConfigStorage{}, + ShallowStorage: ShallowStorage{}, + ObjectStorage: ObjectStorage{ + Objects: make(map[plumbing.Hash]plumbing.EncodedObject), + Commits: make(map[plumbing.Hash]plumbing.EncodedObject), + Trees: make(map[plumbing.Hash]plumbing.EncodedObject), + Blobs: make(map[plumbing.Hash]plumbing.EncodedObject), + Tags: make(map[plumbing.Hash]plumbing.EncodedObject), + }, + ModuleStorage: make(ModuleStorage), + } +} + +type ConfigStorage struct { + config *config.Config +} + +func (c *ConfigStorage) SetConfig(cfg *config.Config) error { + if err := cfg.Validate(); err != nil { + return err + } + + c.config = cfg + return nil +} + +func (c *ConfigStorage) Config() (*config.Config, error) { + if c.config == nil { + c.config = config.NewConfig() + } + + return c.config, nil +} + +type IndexStorage struct { + index *index.Index +} + +func (c *IndexStorage) SetIndex(idx *index.Index) error { + c.index = idx + return nil +} + +func (c *IndexStorage) Index() (*index.Index, error) { + if c.index == nil { + c.index = &index.Index{Version: 2} + } + + return c.index, nil +} + +type ObjectStorage struct { + Objects map[plumbing.Hash]plumbing.EncodedObject + Commits map[plumbing.Hash]plumbing.EncodedObject + Trees map[plumbing.Hash]plumbing.EncodedObject + Blobs map[plumbing.Hash]plumbing.EncodedObject + Tags map[plumbing.Hash]plumbing.EncodedObject +} + +func (o *ObjectStorage) NewEncodedObject() plumbing.EncodedObject { + return &plumbing.MemoryObject{} +} + +func (o *ObjectStorage) SetEncodedObject(obj plumbing.EncodedObject) (plumbing.Hash, error) { + h := obj.Hash() + o.Objects[h] = obj + + switch obj.Type() { + case plumbing.CommitObject: + o.Commits[h] = o.Objects[h] + case plumbing.TreeObject: + o.Trees[h] = o.Objects[h] + case plumbing.BlobObject: + o.Blobs[h] = o.Objects[h] + case plumbing.TagObject: + o.Tags[h] = o.Objects[h] + default: + return h, ErrUnsupportedObjectType + } + + return h, nil +} + +func (o *ObjectStorage) HasEncodedObject(h plumbing.Hash) (err error) { + if _, ok := o.Objects[h]; !ok { + return plumbing.ErrObjectNotFound + } + return nil +} + +func (o *ObjectStorage) EncodedObjectSize(h plumbing.Hash) ( + size int64, err error) { + obj, ok := o.Objects[h] + if !ok { + return 0, plumbing.ErrObjectNotFound + } + + return obj.Size(), nil +} + +func (o *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) { + obj, ok := o.Objects[h] + if !ok || (plumbing.AnyObject != t && obj.Type() != t) { + return nil, plumbing.ErrObjectNotFound + } + + return obj, nil +} + +func (o *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.EncodedObjectIter, error) { + var series []plumbing.EncodedObject + switch t { + case plumbing.AnyObject: + series = flattenObjectMap(o.Objects) + case plumbing.CommitObject: + series = flattenObjectMap(o.Commits) + case plumbing.TreeObject: + series = flattenObjectMap(o.Trees) + case plumbing.BlobObject: + series = flattenObjectMap(o.Blobs) + case plumbing.TagObject: + series = flattenObjectMap(o.Tags) + } + + return storer.NewEncodedObjectSliceIter(series), nil +} + +func flattenObjectMap(m map[plumbing.Hash]plumbing.EncodedObject) []plumbing.EncodedObject { + objects := make([]plumbing.EncodedObject, 0, len(m)) + for _, obj := range m { + objects = append(objects, obj) + } + return objects +} + +func (o *ObjectStorage) Begin() storer.Transaction { + return &TxObjectStorage{ + Storage: o, + Objects: make(map[plumbing.Hash]plumbing.EncodedObject), + } +} + +func (o *ObjectStorage) ForEachObjectHash(fun func(plumbing.Hash) error) error { + for h := range o.Objects { + err := fun(h) + if err != nil { + if err == storer.ErrStop { + return nil + } + return err + } + } + return nil +} + +func (o *ObjectStorage) ObjectPacks() ([]plumbing.Hash, error) { + return nil, nil +} +func (o *ObjectStorage) DeleteOldObjectPackAndIndex(plumbing.Hash, time.Time) error { + return nil +} + +var errNotSupported = fmt.Errorf("Not supported") + +func (o *ObjectStorage) LooseObjectTime(hash plumbing.Hash) (time.Time, error) { + return time.Time{}, errNotSupported +} +func (o *ObjectStorage) DeleteLooseObject(plumbing.Hash) error { + return errNotSupported +} + +type TxObjectStorage struct { + Storage *ObjectStorage + Objects map[plumbing.Hash]plumbing.EncodedObject +} + +func (tx *TxObjectStorage) SetEncodedObject(obj plumbing.EncodedObject) (plumbing.Hash, error) { + h := obj.Hash() + tx.Objects[h] = obj + + return h, nil +} + +func (tx *TxObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) { + obj, ok := tx.Objects[h] + if !ok || (plumbing.AnyObject != t && obj.Type() != t) { + return nil, plumbing.ErrObjectNotFound + } + + return obj, nil +} + +func (tx *TxObjectStorage) Commit() error { + for h, obj := range tx.Objects { + delete(tx.Objects, h) + if _, err := tx.Storage.SetEncodedObject(obj); err != nil { + return err + } + } + + return nil +} + +func (tx *TxObjectStorage) Rollback() error { + tx.Objects = make(map[plumbing.Hash]plumbing.EncodedObject) + return nil +} + +type ReferenceStorage map[plumbing.ReferenceName]*plumbing.Reference + +func (r ReferenceStorage) SetReference(ref *plumbing.Reference) error { + if ref != nil { + r[ref.Name()] = ref + } + + return nil +} + +func (r ReferenceStorage) CheckAndSetReference(ref, old *plumbing.Reference) error { + if ref == nil { + return nil + } + + if old != nil { + tmp := r[ref.Name()] + if tmp != nil && tmp.Hash() != old.Hash() { + return storage.ErrReferenceHasChanged + } + } + r[ref.Name()] = ref + return nil +} + +func (r ReferenceStorage) Reference(n plumbing.ReferenceName) (*plumbing.Reference, error) { + ref, ok := r[n] + if !ok { + return nil, plumbing.ErrReferenceNotFound + } + + return ref, nil +} + +func (r ReferenceStorage) IterReferences() (storer.ReferenceIter, error) { + var refs []*plumbing.Reference + for _, ref := range r { + refs = append(refs, ref) + } + + return storer.NewReferenceSliceIter(refs), nil +} + +func (r ReferenceStorage) CountLooseRefs() (int, error) { + return len(r), nil +} + +func (r ReferenceStorage) PackRefs() error { + return nil +} + +func (r ReferenceStorage) RemoveReference(n plumbing.ReferenceName) error { + delete(r, n) + return nil +} + +type ShallowStorage []plumbing.Hash + +func (s *ShallowStorage) SetShallow(commits []plumbing.Hash) error { + *s = commits + return nil +} + +func (s ShallowStorage) Shallow() ([]plumbing.Hash, error) { + return s, nil +} + +type ModuleStorage map[string]*Storage + +func (s ModuleStorage) Module(name string) (storage.Storer, error) { + if m, ok := s[name]; ok { + return m, nil + } + + m := NewStorage() + s[name] = m + + return m, nil +} diff --git a/vendor/github.com/go-git/go-git/v5/storage/storer.go b/vendor/github.com/go-git/go-git/v5/storage/storer.go new file mode 100644 index 0000000000..4800ac7ba0 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/storage/storer.go @@ -0,0 +1,30 @@ +package storage + +import ( + "errors" + + "github.com/go-git/go-git/v5/config" + "github.com/go-git/go-git/v5/plumbing/storer" +) + +var ErrReferenceHasChanged = errors.New("reference has changed concurrently") + +// Storer is a generic storage of objects, references and any information +// related to a particular repository. The package github.com/go-git/go-git/v5/storage +// contains two implementation a filesystem base implementation (such as `.git`) +// and a memory implementations being ephemeral +type Storer interface { + storer.EncodedObjectStorer + storer.ReferenceStorer + storer.ShallowStorer + storer.IndexStorer + config.ConfigStorer + ModuleStorer +} + +// ModuleStorer allows interact with the modules' Storers +type ModuleStorer interface { + // Module returns a Storer representing a submodule, if not exists returns a + // new empty Storer is returned + Module(name string) (Storer, error) +} diff --git a/vendor/github.com/go-git/go-git/v5/submodule.go b/vendor/github.com/go-git/go-git/v5/submodule.go new file mode 100644 index 0000000000..dff26b0d80 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/submodule.go @@ -0,0 +1,357 @@ +package git + +import ( + "bytes" + "context" + "errors" + "fmt" + + "github.com/go-git/go-billy/v5" + "github.com/go-git/go-git/v5/config" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/format/index" +) + +var ( + ErrSubmoduleAlreadyInitialized = errors.New("submodule already initialized") + ErrSubmoduleNotInitialized = errors.New("submodule not initialized") +) + +// Submodule a submodule allows you to keep another Git repository in a +// subdirectory of your repository. +type Submodule struct { + // initialized defines if a submodule was already initialized. + initialized bool + + c *config.Submodule + w *Worktree +} + +// Config returns the submodule config +func (s *Submodule) Config() *config.Submodule { + return s.c +} + +// Init initialize the submodule reading the recorded Entry in the index for +// the given submodule +func (s *Submodule) Init() error { + cfg, err := s.w.r.Config() + if err != nil { + return err + } + + _, ok := cfg.Submodules[s.c.Name] + if ok { + return ErrSubmoduleAlreadyInitialized + } + + s.initialized = true + + cfg.Submodules[s.c.Name] = s.c + return s.w.r.Storer.SetConfig(cfg) +} + +// Status returns the status of the submodule. +func (s *Submodule) Status() (*SubmoduleStatus, error) { + idx, err := s.w.r.Storer.Index() + if err != nil { + return nil, err + } + + return s.status(idx) +} + +func (s *Submodule) status(idx *index.Index) (*SubmoduleStatus, error) { + status := &SubmoduleStatus{ + Path: s.c.Path, + } + + e, err := idx.Entry(s.c.Path) + if err != nil && err != index.ErrEntryNotFound { + return nil, err + } + + if e != nil { + status.Expected = e.Hash + } + + if !s.initialized { + return status, nil + } + + r, err := s.Repository() + if err != nil { + return nil, err + } + + head, err := r.Head() + if err == nil { + status.Current = head.Hash() + } + + if err != nil && err == plumbing.ErrReferenceNotFound { + err = nil + } + + return status, err +} + +// Repository returns the Repository represented by this submodule +func (s *Submodule) Repository() (*Repository, error) { + if !s.initialized { + return nil, ErrSubmoduleNotInitialized + } + + storer, err := s.w.r.Storer.Module(s.c.Name) + if err != nil { + return nil, err + } + + _, err = storer.Reference(plumbing.HEAD) + if err != nil && err != plumbing.ErrReferenceNotFound { + return nil, err + } + + var exists bool + if err == nil { + exists = true + } + + var worktree billy.Filesystem + if worktree, err = s.w.Filesystem.Chroot(s.c.Path); err != nil { + return nil, err + } + + if exists { + return Open(storer, worktree) + } + + r, err := Init(storer, worktree) + if err != nil { + return nil, err + } + + _, err = r.CreateRemote(&config.RemoteConfig{ + Name: DefaultRemoteName, + URLs: []string{s.c.URL}, + }) + + return r, err +} + +// Update the registered submodule to match what the superproject expects, the +// submodule should be initialized first calling the Init method or setting in +// the options SubmoduleUpdateOptions.Init equals true +func (s *Submodule) Update(o *SubmoduleUpdateOptions) error { + return s.UpdateContext(context.Background(), o) +} + +// UpdateContext the registered submodule to match what the superproject +// expects, the submodule should be initialized first calling the Init method or +// setting in the options SubmoduleUpdateOptions.Init equals true. +// +// The provided Context must be non-nil. If the context expires before the +// operation is complete, an error is returned. The context only affects to the +// transport operations. +func (s *Submodule) UpdateContext(ctx context.Context, o *SubmoduleUpdateOptions) error { + return s.update(ctx, o, plumbing.ZeroHash) +} + +func (s *Submodule) update(ctx context.Context, o *SubmoduleUpdateOptions, forceHash plumbing.Hash) error { + if !s.initialized && !o.Init { + return ErrSubmoduleNotInitialized + } + + if !s.initialized && o.Init { + if err := s.Init(); err != nil { + return err + } + } + + idx, err := s.w.r.Storer.Index() + if err != nil { + return err + } + + hash := forceHash + if hash.IsZero() { + e, err := idx.Entry(s.c.Path) + if err != nil { + return err + } + + hash = e.Hash + } + + r, err := s.Repository() + if err != nil { + return err + } + + if err := s.fetchAndCheckout(ctx, r, o, hash); err != nil { + return err + } + + return s.doRecursiveUpdate(r, o) +} + +func (s *Submodule) doRecursiveUpdate(r *Repository, o *SubmoduleUpdateOptions) error { + if o.RecurseSubmodules == NoRecurseSubmodules { + return nil + } + + w, err := r.Worktree() + if err != nil { + return err + } + + l, err := w.Submodules() + if err != nil { + return err + } + + new := &SubmoduleUpdateOptions{} + *new = *o + + new.RecurseSubmodules-- + return l.Update(new) +} + +func (s *Submodule) fetchAndCheckout( + ctx context.Context, r *Repository, o *SubmoduleUpdateOptions, hash plumbing.Hash, +) error { + if !o.NoFetch { + err := r.FetchContext(ctx, &FetchOptions{Auth: o.Auth}) + if err != nil && err != NoErrAlreadyUpToDate { + return err + } + } + + w, err := r.Worktree() + if err != nil { + return err + } + + if err := w.Checkout(&CheckoutOptions{Hash: hash}); err != nil { + return err + } + + head := plumbing.NewHashReference(plumbing.HEAD, hash) + return r.Storer.SetReference(head) +} + +// Submodules list of several submodules from the same repository. +type Submodules []*Submodule + +// Init initializes the submodules in this list. +func (s Submodules) Init() error { + for _, sub := range s { + if err := sub.Init(); err != nil { + return err + } + } + + return nil +} + +// Update updates all the submodules in this list. +func (s Submodules) Update(o *SubmoduleUpdateOptions) error { + return s.UpdateContext(context.Background(), o) +} + +// UpdateContext updates all the submodules in this list. +// +// The provided Context must be non-nil. If the context expires before the +// operation is complete, an error is returned. The context only affects to the +// transport operations. +func (s Submodules) UpdateContext(ctx context.Context, o *SubmoduleUpdateOptions) error { + for _, sub := range s { + if err := sub.UpdateContext(ctx, o); err != nil { + return err + } + } + + return nil +} + +// Status returns the status of the submodules. +func (s Submodules) Status() (SubmodulesStatus, error) { + var list SubmodulesStatus + + var r *Repository + for _, sub := range s { + if r == nil { + r = sub.w.r + } + + idx, err := r.Storer.Index() + if err != nil { + return nil, err + } + + status, err := sub.status(idx) + if err != nil { + return nil, err + } + + list = append(list, status) + } + + return list, nil +} + +// SubmodulesStatus contains the status for all submodiles in the worktree +type SubmodulesStatus []*SubmoduleStatus + +// String is equivalent to `git submodule status` +func (s SubmodulesStatus) String() string { + buf := bytes.NewBuffer(nil) + for _, sub := range s { + fmt.Fprintln(buf, sub) + } + + return buf.String() +} + +// SubmoduleStatus contains the status for a submodule in the worktree +type SubmoduleStatus struct { + Path string + Current plumbing.Hash + Expected plumbing.Hash + Branch plumbing.ReferenceName +} + +// IsClean is the HEAD of the submodule is equals to the expected commit +func (s *SubmoduleStatus) IsClean() bool { + return s.Current == s.Expected +} + +// String is equivalent to `git submodule status ` +// +// This will print the SHA-1 of the currently checked out commit for a +// submodule, along with the submodule path and the output of git describe fo +// the SHA-1. Each SHA-1 will be prefixed with - if the submodule is not +// initialized, + if the currently checked out submodule commit does not match +// the SHA-1 found in the index of the containing repository. +func (s *SubmoduleStatus) String() string { + var extra string + var status = ' ' + + if s.Current.IsZero() { + status = '-' + } else if !s.IsClean() { + status = '+' + } + + if len(s.Branch) != 0 { + extra = string(s.Branch[5:]) + } else if !s.Current.IsZero() { + extra = s.Current.String()[:7] + } + + if extra != "" { + extra = fmt.Sprintf(" (%s)", extra) + } + + return fmt.Sprintf("%c%s %s%s", status, s.Expected, s.Path, extra) +} diff --git a/vendor/github.com/go-git/go-git/v5/utils/binary/read.go b/vendor/github.com/go-git/go-git/v5/utils/binary/read.go new file mode 100644 index 0000000000..a14d48db9c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/utils/binary/read.go @@ -0,0 +1,180 @@ +// Package binary implements sintax-sugar functions on top of the standard +// library binary package +package binary + +import ( + "bufio" + "encoding/binary" + "io" + + "github.com/go-git/go-git/v5/plumbing" +) + +// Read reads structured binary data from r into data. Bytes are read and +// decoded in BigEndian order +// https://golang.org/pkg/encoding/binary/#Read +func Read(r io.Reader, data ...interface{}) error { + for _, v := range data { + if err := binary.Read(r, binary.BigEndian, v); err != nil { + return err + } + } + + return nil +} + +// ReadUntil reads from r untin delim is found +func ReadUntil(r io.Reader, delim byte) ([]byte, error) { + if bufr, ok := r.(*bufio.Reader); ok { + return ReadUntilFromBufioReader(bufr, delim) + } + + var buf [1]byte + value := make([]byte, 0, 16) + for { + if _, err := io.ReadFull(r, buf[:]); err != nil { + if err == io.EOF { + return nil, err + } + + return nil, err + } + + if buf[0] == delim { + return value, nil + } + + value = append(value, buf[0]) + } +} + +// ReadUntilFromBufioReader is like bufio.ReadBytes but drops the delimiter +// from the result. +func ReadUntilFromBufioReader(r *bufio.Reader, delim byte) ([]byte, error) { + value, err := r.ReadBytes(delim) + if err != nil || len(value) == 0 { + return nil, err + } + + return value[:len(value)-1], nil +} + +// ReadVariableWidthInt reads and returns an int in Git VLQ special format: +// +// Ordinary VLQ has some redundancies, example: the number 358 can be +// encoded as the 2-octet VLQ 0x8166 or the 3-octet VLQ 0x808166 or the +// 4-octet VLQ 0x80808166 and so forth. +// +// To avoid these redundancies, the VLQ format used in Git removes this +// prepending redundancy and extends the representable range of shorter +// VLQs by adding an offset to VLQs of 2 or more octets in such a way +// that the lowest possible value for such an (N+1)-octet VLQ becomes +// exactly one more than the maximum possible value for an N-octet VLQ. +// In particular, since a 1-octet VLQ can store a maximum value of 127, +// the minimum 2-octet VLQ (0x8000) is assigned the value 128 instead of +// 0. Conversely, the maximum value of such a 2-octet VLQ (0xff7f) is +// 16511 instead of just 16383. Similarly, the minimum 3-octet VLQ +// (0x808000) has a value of 16512 instead of zero, which means +// that the maximum 3-octet VLQ (0xffff7f) is 2113663 instead of +// just 2097151. And so forth. +// +// This is how the offset is saved in C: +// +// dheader[pos] = ofs & 127; +// while (ofs >>= 7) +// dheader[--pos] = 128 | (--ofs & 127); +// +func ReadVariableWidthInt(r io.Reader) (int64, error) { + var c byte + if err := Read(r, &c); err != nil { + return 0, err + } + + var v = int64(c & maskLength) + for c&maskContinue > 0 { + v++ + if err := Read(r, &c); err != nil { + return 0, err + } + + v = (v << lengthBits) + int64(c&maskLength) + } + + return v, nil +} + +const ( + maskContinue = uint8(128) // 1000 000 + maskLength = uint8(127) // 0111 1111 + lengthBits = uint8(7) // subsequent bytes has 7 bits to store the length +) + +// ReadUint64 reads 8 bytes and returns them as a BigEndian uint32 +func ReadUint64(r io.Reader) (uint64, error) { + var v uint64 + if err := binary.Read(r, binary.BigEndian, &v); err != nil { + return 0, err + } + + return v, nil +} + +// ReadUint32 reads 4 bytes and returns them as a BigEndian uint32 +func ReadUint32(r io.Reader) (uint32, error) { + var v uint32 + if err := binary.Read(r, binary.BigEndian, &v); err != nil { + return 0, err + } + + return v, nil +} + +// ReadUint16 reads 2 bytes and returns them as a BigEndian uint16 +func ReadUint16(r io.Reader) (uint16, error) { + var v uint16 + if err := binary.Read(r, binary.BigEndian, &v); err != nil { + return 0, err + } + + return v, nil +} + +// ReadHash reads a plumbing.Hash from r +func ReadHash(r io.Reader) (plumbing.Hash, error) { + var h plumbing.Hash + if err := binary.Read(r, binary.BigEndian, h[:]); err != nil { + return plumbing.ZeroHash, err + } + + return h, nil +} + +const sniffLen = 8000 + +// IsBinary detects if data is a binary value based on: +// http://git.kernel.org/cgit/git/git.git/tree/xdiff-interface.c?id=HEAD#n198 +func IsBinary(r io.Reader) (bool, error) { + reader := bufio.NewReader(r) + c := 0 + for { + if c == sniffLen { + break + } + + b, err := reader.ReadByte() + if err == io.EOF { + break + } + if err != nil { + return false, err + } + + if b == byte(0) { + return true, nil + } + + c++ + } + + return false, nil +} diff --git a/vendor/github.com/go-git/go-git/v5/utils/binary/write.go b/vendor/github.com/go-git/go-git/v5/utils/binary/write.go new file mode 100644 index 0000000000..c08c73a06b --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/utils/binary/write.go @@ -0,0 +1,50 @@ +package binary + +import ( + "encoding/binary" + "io" +) + +// Write writes the binary representation of data into w, using BigEndian order +// https://golang.org/pkg/encoding/binary/#Write +func Write(w io.Writer, data ...interface{}) error { + for _, v := range data { + if err := binary.Write(w, binary.BigEndian, v); err != nil { + return err + } + } + + return nil +} + +func WriteVariableWidthInt(w io.Writer, n int64) error { + buf := []byte{byte(n & 0x7f)} + n >>= 7 + for n != 0 { + n-- + buf = append([]byte{0x80 | (byte(n & 0x7f))}, buf...) + n >>= 7 + } + + _, err := w.Write(buf) + + return err +} + +// WriteUint64 writes the binary representation of a uint64 into w, in BigEndian +// order +func WriteUint64(w io.Writer, value uint64) error { + return binary.Write(w, binary.BigEndian, value) +} + +// WriteUint32 writes the binary representation of a uint32 into w, in BigEndian +// order +func WriteUint32(w io.Writer, value uint32) error { + return binary.Write(w, binary.BigEndian, value) +} + +// WriteUint16 writes the binary representation of a uint16 into w, in BigEndian +// order +func WriteUint16(w io.Writer, value uint16) error { + return binary.Write(w, binary.BigEndian, value) +} diff --git a/vendor/github.com/go-git/go-git/v5/utils/diff/diff.go b/vendor/github.com/go-git/go-git/v5/utils/diff/diff.go new file mode 100644 index 0000000000..70054949fd --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/utils/diff/diff.go @@ -0,0 +1,61 @@ +// Package diff implements line oriented diffs, similar to the ancient +// Unix diff command. +// +// The current implementation is just a wrapper around Sergi's +// go-diff/diffmatchpatch library, which is a go port of Neil +// Fraser's google-diff-match-patch code +package diff + +import ( + "bytes" + "time" + + "github.com/sergi/go-diff/diffmatchpatch" +) + +// Do computes the (line oriented) modifications needed to turn the src +// string into the dst string. The underlying algorithm is Meyers, +// its complexity is O(N*d) where N is min(lines(src), lines(dst)) and d +// is the size of the diff. +func Do(src, dst string) (diffs []diffmatchpatch.Diff) { + // the default timeout is time.Second which may be too small under heavy load + return DoWithTimeout(src, dst, time.Hour) +} + +// DoWithTimeout computes the (line oriented) modifications needed to turn the src +// string into the dst string. The `timeout` argument specifies the maximum +// amount of time it is allowed to spend in this function. If the timeout +// is exceeded, the parts of the strings which were not considered are turned into +// a bulk delete+insert and the half-baked suboptimal result is returned at once. +// The underlying algorithm is Meyers, its complexity is O(N*d) where N is +// min(lines(src), lines(dst)) and d is the size of the diff. +func DoWithTimeout(src, dst string, timeout time.Duration) (diffs []diffmatchpatch.Diff) { + dmp := diffmatchpatch.New() + dmp.DiffTimeout = timeout + wSrc, wDst, warray := dmp.DiffLinesToRunes(src, dst) + diffs = dmp.DiffMainRunes(wSrc, wDst, false) + diffs = dmp.DiffCharsToLines(diffs, warray) + return diffs +} + +// Dst computes and returns the destination text. +func Dst(diffs []diffmatchpatch.Diff) string { + var text bytes.Buffer + for _, d := range diffs { + if d.Type != diffmatchpatch.DiffDelete { + text.WriteString(d.Text) + } + } + return text.String() +} + +// Src computes and returns the source text +func Src(diffs []diffmatchpatch.Diff) string { + var text bytes.Buffer + for _, d := range diffs { + if d.Type != diffmatchpatch.DiffInsert { + text.WriteString(d.Text) + } + } + return text.String() +} diff --git a/vendor/github.com/go-git/go-git/v5/utils/ioutil/common.go b/vendor/github.com/go-git/go-git/v5/utils/ioutil/common.go new file mode 100644 index 0000000000..e9dcbfe49b --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/utils/ioutil/common.go @@ -0,0 +1,170 @@ +// Package ioutil implements some I/O utility functions. +package ioutil + +import ( + "bufio" + "context" + "errors" + "io" + + "github.com/jbenet/go-context/io" +) + +type readPeeker interface { + io.Reader + Peek(int) ([]byte, error) +} + +var ( + ErrEmptyReader = errors.New("reader is empty") +) + +// NonEmptyReader takes a reader and returns it if it is not empty, or +// `ErrEmptyReader` if it is empty. If there is an error when reading the first +// byte of the given reader, it will be propagated. +func NonEmptyReader(r io.Reader) (io.Reader, error) { + pr, ok := r.(readPeeker) + if !ok { + pr = bufio.NewReader(r) + } + + _, err := pr.Peek(1) + if err == io.EOF { + return nil, ErrEmptyReader + } + + if err != nil { + return nil, err + } + + return pr, nil +} + +type readCloser struct { + io.Reader + closer io.Closer +} + +func (r *readCloser) Close() error { + return r.closer.Close() +} + +// NewReadCloser creates an `io.ReadCloser` with the given `io.Reader` and +// `io.Closer`. +func NewReadCloser(r io.Reader, c io.Closer) io.ReadCloser { + return &readCloser{Reader: r, closer: c} +} + +type writeCloser struct { + io.Writer + closer io.Closer +} + +func (r *writeCloser) Close() error { + return r.closer.Close() +} + +// NewWriteCloser creates an `io.WriteCloser` with the given `io.Writer` and +// `io.Closer`. +func NewWriteCloser(w io.Writer, c io.Closer) io.WriteCloser { + return &writeCloser{Writer: w, closer: c} +} + +type writeNopCloser struct { + io.Writer +} + +func (writeNopCloser) Close() error { return nil } + +// WriteNopCloser returns a WriteCloser with a no-op Close method wrapping +// the provided Writer w. +func WriteNopCloser(w io.Writer) io.WriteCloser { + return writeNopCloser{w} +} + +// CheckClose calls Close on the given io.Closer. If the given *error points to +// nil, it will be assigned the error returned by Close. Otherwise, any error +// returned by Close will be ignored. CheckClose is usually called with defer. +func CheckClose(c io.Closer, err *error) { + if cerr := c.Close(); cerr != nil && *err == nil { + *err = cerr + } +} + +// NewContextWriter wraps a writer to make it respect given Context. +// If there is a blocking write, the returned Writer will return whenever the +// context is cancelled (the return values are n=0 and err=ctx.Err()). +func NewContextWriter(ctx context.Context, w io.Writer) io.Writer { + return ctxio.NewWriter(ctx, w) +} + +// NewContextReader wraps a reader to make it respect given Context. +// If there is a blocking read, the returned Reader will return whenever the +// context is cancelled (the return values are n=0 and err=ctx.Err()). +func NewContextReader(ctx context.Context, r io.Reader) io.Reader { + return ctxio.NewReader(ctx, r) +} + +// NewContextWriteCloser as NewContextWriter but with io.Closer interface. +func NewContextWriteCloser(ctx context.Context, w io.WriteCloser) io.WriteCloser { + ctxw := ctxio.NewWriter(ctx, w) + return NewWriteCloser(ctxw, w) +} + +// NewContextReadCloser as NewContextReader but with io.Closer interface. +func NewContextReadCloser(ctx context.Context, r io.ReadCloser) io.ReadCloser { + ctxr := ctxio.NewReader(ctx, r) + return NewReadCloser(ctxr, r) +} + +type readerOnError struct { + io.Reader + notify func(error) +} + +// NewReaderOnError returns a io.Reader that call the notify function when an +// unexpected (!io.EOF) error happens, after call Read function. +func NewReaderOnError(r io.Reader, notify func(error)) io.Reader { + return &readerOnError{r, notify} +} + +// NewReadCloserOnError returns a io.ReadCloser that call the notify function +// when an unexpected (!io.EOF) error happens, after call Read function. +func NewReadCloserOnError(r io.ReadCloser, notify func(error)) io.ReadCloser { + return NewReadCloser(NewReaderOnError(r, notify), r) +} + +func (r *readerOnError) Read(buf []byte) (n int, err error) { + n, err = r.Reader.Read(buf) + if err != nil && err != io.EOF { + r.notify(err) + } + + return +} + +type writerOnError struct { + io.Writer + notify func(error) +} + +// NewWriterOnError returns a io.Writer that call the notify function when an +// unexpected (!io.EOF) error happens, after call Write function. +func NewWriterOnError(w io.Writer, notify func(error)) io.Writer { + return &writerOnError{w, notify} +} + +// NewWriteCloserOnError returns a io.WriteCloser that call the notify function +//when an unexpected (!io.EOF) error happens, after call Write function. +func NewWriteCloserOnError(w io.WriteCloser, notify func(error)) io.WriteCloser { + return NewWriteCloser(NewWriterOnError(w, notify), w) +} + +func (r *writerOnError) Write(p []byte) (n int, err error) { + n, err = r.Writer.Write(p) + if err != nil && err != io.EOF { + r.notify(err) + } + + return +} diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/change.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/change.go new file mode 100644 index 0000000000..cc6dc89071 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/change.go @@ -0,0 +1,149 @@ +package merkletrie + +import ( + "fmt" + "io" + + "github.com/go-git/go-git/v5/utils/merkletrie/noder" +) + +// Action values represent the kind of things a Change can represent: +// insertion, deletions or modifications of files. +type Action int + +// The set of possible actions in a change. +const ( + _ Action = iota + Insert + Delete + Modify +) + +// String returns the action as a human readable text. +func (a Action) String() string { + switch a { + case Insert: + return "Insert" + case Delete: + return "Delete" + case Modify: + return "Modify" + default: + panic(fmt.Sprintf("unsupported action: %d", a)) + } +} + +// A Change value represent how a noder has change between to merkletries. +type Change struct { + // The noder before the change or nil if it was inserted. + From noder.Path + // The noder after the change or nil if it was deleted. + To noder.Path +} + +// Action is convenience method that returns what Action c represents. +func (c *Change) Action() (Action, error) { + if c.From == nil && c.To == nil { + return Action(0), fmt.Errorf("malformed change: nil from and to") + } + if c.From == nil { + return Insert, nil + } + if c.To == nil { + return Delete, nil + } + + return Modify, nil +} + +// NewInsert returns a new Change representing the insertion of n. +func NewInsert(n noder.Path) Change { return Change{To: n} } + +// NewDelete returns a new Change representing the deletion of n. +func NewDelete(n noder.Path) Change { return Change{From: n} } + +// NewModify returns a new Change representing that a has been modified and +// it is now b. +func NewModify(a, b noder.Path) Change { + return Change{ + From: a, + To: b, + } +} + +// String returns a single change in human readable form, using the +// format: '<' + action + space + path + '>'. The contents of the file +// before or after the change are not included in this format. +// +// Example: inserting a file at the path a/b/c.txt will return "". +func (c Change) String() string { + action, err := c.Action() + if err != nil { + panic(err) + } + + var path string + if action == Delete { + path = c.From.String() + } else { + path = c.To.String() + } + + return fmt.Sprintf("<%s %s>", action, path) +} + +// Changes is a list of changes between to merkletries. +type Changes []Change + +// NewChanges returns an empty list of changes. +func NewChanges() Changes { + return Changes{} +} + +// Add adds the change c to the list of changes. +func (l *Changes) Add(c Change) { + *l = append(*l, c) +} + +// AddRecursiveInsert adds the required changes to insert all the +// file-like noders found in root, recursively. +func (l *Changes) AddRecursiveInsert(root noder.Path) error { + return l.addRecursive(root, NewInsert) +} + +// AddRecursiveDelete adds the required changes to delete all the +// file-like noders found in root, recursively. +func (l *Changes) AddRecursiveDelete(root noder.Path) error { + return l.addRecursive(root, NewDelete) +} + +type noderToChangeFn func(noder.Path) Change // NewInsert or NewDelete + +func (l *Changes) addRecursive(root noder.Path, ctor noderToChangeFn) error { + if !root.IsDir() { + l.Add(ctor(root)) + return nil + } + + i, err := NewIterFromPath(root) + if err != nil { + return err + } + + var current noder.Path + for { + if current, err = i.Step(); err != nil { + if err == io.EOF { + break + } + return err + } + if current.IsDir() { + continue + } + l.Add(ctor(current)) + } + + return nil +} diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go new file mode 100644 index 0000000000..bd084b2ab0 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go @@ -0,0 +1,428 @@ +package merkletrie + +// The focus of this difftree implementation is to save time by +// skipping whole directories if their hash is the same in both +// trees. +// +// The diff algorithm implemented here is based on the doubleiter +// type defined in this same package; we will iterate over both +// trees at the same time, while comparing the current noders in +// each iterator. Depending on how they differ we will output the +// corresponding changes and move the iterators further over both +// trees. +// +// The table bellow show all the possible comparison results, along +// with what changes should we produce and how to advance the +// iterators. +// +// The table is implemented by the switches in this function, +// diffTwoNodes, diffTwoNodesSameName and diffTwoDirs. +// +// Many Bothans died to bring us this information, make sure you +// understand the table before modifying this code. + +// # Cases +// +// When comparing noders in both trees you will find yourself in +// one of 169 possible cases, but if we ignore moves, we can +// simplify a lot the search space into the following table: +// +// - "-": nothing, no file or directory +// - a<>: an empty file named "a". +// - a<1>: a file named "a", with "1" as its contents. +// - a<2>: a file named "a", with "2" as its contents. +// - a(): an empty dir named "a". +// - a(...): a dir named "a", with some files and/or dirs inside (possibly +// empty). +// - a(;;;): a dir named "a", with some other files and/or dirs inside +// (possibly empty), which different from the ones in "a(...)". +// +// \ to - a<> a<1> a<2> a() a(...) a(;;;) +// from \ +// - 00 01 02 03 04 05 06 +// a<> 10 11 12 13 14 15 16 +// a<1> 20 21 22 23 24 25 26 +// a<2> 30 31 32 33 34 35 36 +// a() 40 41 42 43 44 45 46 +// a(...) 50 51 52 53 54 55 56 +// a(;;;) 60 61 62 63 64 65 66 +// +// Every (from, to) combination in the table is a special case, but +// some of them can be merged into some more general cases, for +// instance 11 and 22 can be merged into the general case: both +// noders are equal. +// +// Here is a full list of all the cases that are similar and how to +// merge them together into more general cases. Each general case +// is labeled with an uppercase letter for further reference, and it +// is followed by the pseudocode of the checks you have to perfrom +// on both noders to see if you are in such a case, the actions to +// perform (i.e. what changes to output) and how to advance the +// iterators of each tree to continue the comparison process. +// +// ## A. Impossible: 00 +// +// ## B. Same thing on both sides: 11, 22, 33, 44, 55, 66 +// - check: `SameName() && SameHash()` +// - action: do nothing. +// - advance: `FromNext(); ToNext()` +// +// ### C. To was created: 01, 02, 03, 04, 05, 06 +// - check: `DifferentName() && ToBeforeFrom()` +// - action: insertRecursively(to) +// - advance: `ToNext()` +// +// ### D. From was deleted: 10, 20, 30, 40, 50, 60 +// - check: `DifferentName() && FromBeforeTo()` +// - action: `DeleteRecursively(from)` +// - advance: `FromNext()` +// +// ### E. Empty file to file with contents: 12, 13 +// - check: `SameName() && DifferentHash() && FromIsFile() && +// ToIsFile() && FromIsEmpty()` +// - action: `modifyFile(from, to)` +// - advance: `FromNext()` or `FromStep()` +// +// ### E'. file with contents to empty file: 21, 31 +// - check: `SameName() && DifferentHash() && FromIsFile() && +// ToIsFile() && ToIsEmpty()` +// - action: `modifyFile(from, to)` +// - advance: `FromNext()` or `FromStep()` +// +// ### F. empty file to empty dir with the same name: 14 +// - check: `SameName() && FromIsFile() && FromIsEmpty() && +// ToIsDir() && ToIsEmpty()` +// - action: `DeleteFile(from); InsertEmptyDir(to)` +// - advance: `FromNext(); ToNext()` +// +// ### F'. empty dir to empty file of the same name: 41 +// - check: `SameName() && FromIsDir() && FromIsEmpty && +// ToIsFile() && ToIsEmpty()` +// - action: `DeleteEmptyDir(from); InsertFile(to)` +// - advance: `FromNext(); ToNext()` or step for any of them. +// +// ### G. empty file to non-empty dir of the same name: 15, 16 +// - check: `SameName() && FromIsFile() && ToIsDir() && +// FromIsEmpty() && ToIsNotEmpty()` +// - action: `DeleteFile(from); InsertDirRecursively(to)` +// - advance: `FromNext(); ToNext()` +// +// ### G'. non-empty dir to empty file of the same name: 51, 61 +// - check: `SameName() && FromIsDir() && FromIsNotEmpty() && +// ToIsFile() && FromIsEmpty()` +// - action: `DeleteDirRecursively(from); InsertFile(to)` +// - advance: `FromNext(); ToNext()` +// +// ### H. modify file contents: 23, 32 +// - check: `SameName() && FromIsFile() && ToIsFile() && +// FromIsNotEmpty() && ToIsNotEmpty()` +// - action: `ModifyFile(from, to)` +// - advance: `FromNext(); ToNext()` +// +// ### I. file with contents to empty dir: 24, 34 +// - check: `SameName() && DifferentHash() && FromIsFile() && +// FromIsNotEmpty() && ToIsDir() && ToIsEmpty()` +// - action: `DeleteFile(from); InsertEmptyDir(to)` +// - advance: `FromNext(); ToNext()` +// +// ### I'. empty dir to file with contents: 42, 43 +// - check: `SameName() && DifferentHash() && FromIsDir() && +// FromIsEmpty() && ToIsFile() && ToIsEmpty()` +// - action: `DeleteDir(from); InsertFile(to)` +// - advance: `FromNext(); ToNext()` +// +// ### J. file with contents to dir with contents: 25, 26, 35, 36 +// - check: `SameName() && DifferentHash() && FromIsFile() && +// FromIsNotEmpty() && ToIsDir() && ToIsNotEmpty()` +// - action: `DeleteFile(from); InsertDirRecursively(to)` +// - advance: `FromNext(); ToNext()` +// +// ### J'. dir with contents to file with contents: 52, 62, 53, 63 +// - check: `SameName() && DifferentHash() && FromIsDir() && +// FromIsNotEmpty() && ToIsFile() && ToIsNotEmpty()` +// - action: `DeleteDirRecursively(from); InsertFile(to)` +// - advance: `FromNext(); ToNext()` +// +// ### K. empty dir to dir with contents: 45, 46 +// - check: `SameName() && DifferentHash() && FromIsDir() && +// FromIsEmpty() && ToIsDir() && ToIsNotEmpty()` +// - action: `InsertChildrenRecursively(to)` +// - advance: `FromNext(); ToNext()` +// +// ### K'. dir with contents to empty dir: 54, 64 +// - check: `SameName() && DifferentHash() && FromIsDir() && +// FromIsEmpty() && ToIsDir() && ToIsNotEmpty()` +// - action: `DeleteChildrenRecursively(from)` +// - advance: `FromNext(); ToNext()` +// +// ### L. dir with contents to dir with different contents: 56, 65 +// - check: `SameName() && DifferentHash() && FromIsDir() && +// FromIsNotEmpty() && ToIsDir() && ToIsNotEmpty()` +// - action: nothing +// - advance: `FromStep(); ToStep()` +// +// + +// All these cases can be further simplified by a truth table +// reduction process, in which we gather similar checks together to +// make the final code easier to read and understand. +// +// The first 6 columns are the outputs of the checks to perform on +// both noders. I have labeled them 1 to 6, this is what they mean: +// +// 1: SameName() +// 2: SameHash() +// 3: FromIsDir() +// 4: ToIsDir() +// 5: FromIsEmpty() +// 6: ToIsEmpty() +// +// The from and to columns are a fsnoder example of the elements +// that you will find on each tree under the specified comparison +// results (columns 1 to 6). +// +// The type column identifies the case we are into, from the list above. +// +// The type' column identifies the new set of reduced cases, using +// lowercase letters, and they are explained after the table. +// +// The last column is the set of actions and advances for each case. +// +// "---" means impossible except in case of hash collision. +// +// advance meaning: +// - NN: from.Next(); to.Next() +// - SS: from.Step(); to.Step() +// +// 1 2 3 4 5 6 | from | to |type|type'|action ; advance +// ------------+--------+--------+----+------------------------------------ +// 0 0 0 0 0 0 | | | | | if !SameName() { +// . | | | | | if FromBeforeTo() { +// . | | | D | d | delete(from); from.Next() +// . | | | | | } else { +// . | | | C | c | insert(to); to.Next() +// . | | | | | } +// 0 1 1 1 1 1 | | | | | } +// 1 0 0 0 0 0 | a<1> | a<2> | H | e | modify(from, to); NN +// 1 0 0 0 0 1 | a<1> | a<> | E' | e | modify(from, to); NN +// 1 0 0 0 1 0 | a<> | a<1> | E | e | modify(from, to); NN +// 1 0 0 0 1 1 | ---- | ---- | | e | +// 1 0 0 1 0 0 | a<1> | a(...) | J | f | delete(from); insert(to); NN +// 1 0 0 1 0 1 | a<1> | a() | I | f | delete(from); insert(to); NN +// 1 0 0 1 1 0 | a<> | a(...) | G | f | delete(from); insert(to); NN +// 1 0 0 1 1 1 | a<> | a() | F | f | delete(from); insert(to); NN +// 1 0 1 0 0 0 | a(...) | a<1> | J' | f | delete(from); insert(to); NN +// 1 0 1 0 0 1 | a(...) | a<> | G' | f | delete(from); insert(to); NN +// 1 0 1 0 1 0 | a() | a<1> | I' | f | delete(from); insert(to); NN +// 1 0 1 0 1 1 | a() | a<> | F' | f | delete(from); insert(to); NN +// 1 0 1 1 0 0 | a(...) | a(;;;) | L | g | nothing; SS +// 1 0 1 1 0 1 | a(...) | a() | K' | h | deleteChildren(from); NN +// 1 0 1 1 1 0 | a() | a(...) | K | i | insertChildren(to); NN +// 1 0 1 1 1 1 | ---- | ---- | | | +// 1 1 0 0 0 0 | a<1> | a<1> | B | b | nothing; NN +// 1 1 0 0 0 1 | ---- | ---- | | b | +// 1 1 0 0 1 0 | ---- | ---- | | b | +// 1 1 0 0 1 1 | a<> | a<> | B | b | nothing; NN +// 1 1 0 1 0 0 | ---- | ---- | | b | +// 1 1 0 1 0 1 | ---- | ---- | | b | +// 1 1 0 1 1 0 | ---- | ---- | | b | +// 1 1 0 1 1 1 | ---- | ---- | | b | +// 1 1 1 0 0 0 | ---- | ---- | | b | +// 1 1 1 0 0 1 | ---- | ---- | | b | +// 1 1 1 0 1 0 | ---- | ---- | | b | +// 1 1 1 0 1 1 | ---- | ---- | | b | +// 1 1 1 1 0 0 | a(...) | a(...) | B | b | nothing; NN +// 1 1 1 1 0 1 | ---- | ---- | | b | +// 1 1 1 1 1 0 | ---- | ---- | | b | +// 1 1 1 1 1 1 | a() | a() | B | b | nothing; NN +// +// c and d: +// if !SameName() +// d if FromBeforeTo() +// c else +// b: SameName) && sameHash() +// e: SameName() && !sameHash() && BothAreFiles() +// f: SameName() && !sameHash() && FileAndDir() +// g: SameName() && !sameHash() && BothAreDirs() && NoneIsEmpty +// i: SameName() && !sameHash() && BothAreDirs() && FromIsEmpty +// h: else of i + +import ( + "context" + "errors" + "fmt" + + "github.com/go-git/go-git/v5/utils/merkletrie/noder" +) + +var ( + // ErrCanceled is returned whenever the operation is canceled. + ErrCanceled = errors.New("operation canceled") +) + +// DiffTree calculates the list of changes between two merkletries. It +// uses the provided hashEqual callback to compare noders. +func DiffTree( + fromTree, + toTree noder.Noder, + hashEqual noder.Equal, +) (Changes, error) { + return DiffTreeContext(context.Background(), fromTree, toTree, hashEqual) +} + +// DiffTreeContext calculates the list of changes between two merkletries. It +// uses the provided hashEqual callback to compare noders. +// Error will be returned if context expires +// Provided context must be non nil +func DiffTreeContext(ctx context.Context, fromTree, toTree noder.Noder, + hashEqual noder.Equal) (Changes, error) { + ret := NewChanges() + + ii, err := newDoubleIter(fromTree, toTree, hashEqual) + if err != nil { + return nil, err + } + + for { + select { + case <-ctx.Done(): + return nil, ErrCanceled + default: + } + + from := ii.from.current + to := ii.to.current + + switch r := ii.remaining(); r { + case noMoreNoders: + return ret, nil + case onlyFromRemains: + if err = ret.AddRecursiveDelete(from); err != nil { + return nil, err + } + if err = ii.nextFrom(); err != nil { + return nil, err + } + case onlyToRemains: + if err = ret.AddRecursiveInsert(to); err != nil { + return nil, err + } + if err = ii.nextTo(); err != nil { + return nil, err + } + case bothHaveNodes: + if err = diffNodes(&ret, ii); err != nil { + return nil, err + } + default: + panic(fmt.Sprintf("unknown remaining value: %d", r)) + } + } +} + +func diffNodes(changes *Changes, ii *doubleIter) error { + from := ii.from.current + to := ii.to.current + var err error + + // compare their full paths as strings + switch from.Compare(to) { + case -1: + if err = changes.AddRecursiveDelete(from); err != nil { + return err + } + if err = ii.nextFrom(); err != nil { + return err + } + case 1: + if err = changes.AddRecursiveInsert(to); err != nil { + return err + } + if err = ii.nextTo(); err != nil { + return err + } + default: + if err := diffNodesSameName(changes, ii); err != nil { + return err + } + } + + return nil +} + +func diffNodesSameName(changes *Changes, ii *doubleIter) error { + from := ii.from.current + to := ii.to.current + + status, err := ii.compare() + if err != nil { + return err + } + + switch { + case status.sameHash: + // do nothing + if err = ii.nextBoth(); err != nil { + return err + } + case status.bothAreFiles: + changes.Add(NewModify(from, to)) + if err = ii.nextBoth(); err != nil { + return err + } + case status.fileAndDir: + if err = changes.AddRecursiveDelete(from); err != nil { + return err + } + if err = changes.AddRecursiveInsert(to); err != nil { + return err + } + if err = ii.nextBoth(); err != nil { + return err + } + case status.bothAreDirs: + if err = diffDirs(changes, ii); err != nil { + return err + } + default: + return fmt.Errorf("bad status from double iterator") + } + + return nil +} + +func diffDirs(changes *Changes, ii *doubleIter) error { + from := ii.from.current + to := ii.to.current + + status, err := ii.compare() + if err != nil { + return err + } + + switch { + case status.fromIsEmptyDir: + if err = changes.AddRecursiveInsert(to); err != nil { + return err + } + if err = ii.nextBoth(); err != nil { + return err + } + case status.toIsEmptyDir: + if err = changes.AddRecursiveDelete(from); err != nil { + return err + } + if err = ii.nextBoth(); err != nil { + return err + } + case !status.fromIsEmptyDir && !status.toIsEmptyDir: + // do nothing + if err = ii.stepBoth(); err != nil { + return err + } + default: + return fmt.Errorf("both dirs are empty but has different hash") + } + + return nil +} diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/doc.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/doc.go new file mode 100644 index 0000000000..5204024ad4 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/doc.go @@ -0,0 +1,34 @@ +/* +Package merkletrie provides support for n-ary trees that are at the same +time Merkle trees and Radix trees (tries). + +Git trees are Radix n-ary trees in virtue of the names of their +tree entries. At the same time, git trees are Merkle trees thanks to +their hashes. + +This package defines Merkle tries as nodes that should have: + +- a hash: the Merkle part of the Merkle trie + +- a key: the Radix part of the Merkle trie + +The Merkle hash condition is not enforced by this package though. This +means that the hash of a node doesn't have to take into account the hashes of +their children, which is good for testing purposes. + +Nodes in the Merkle trie are abstracted by the Noder interface. The +intended use is that git trees implements this interface, either +directly or using a simple wrapper. + +This package provides an iterator for merkletries that can skip whole +directory-like noders and an efficient merkletrie comparison algorithm. + +When comparing git trees, the simple approach of alphabetically sorting +their elements and comparing the resulting lists is too slow as it +depends linearly on the number of files in the trees: When a directory +has lots of files but none of them has been modified, this approach is +very expensive. We can do better by prunning whole directories that +have not change, just by looking at their hashes. This package provides +the tools to do exactly that. +*/ +package merkletrie diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/doubleiter.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/doubleiter.go new file mode 100644 index 0000000000..4a4341b387 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/doubleiter.go @@ -0,0 +1,187 @@ +package merkletrie + +import ( + "fmt" + "io" + + "github.com/go-git/go-git/v5/utils/merkletrie/noder" +) + +// A doubleIter is a convenience type to keep track of the current +// noders in two merkletries that are going to be iterated in parallel. +// It has methods for: +// +// - iterating over the merkletries, both at the same time or +// individually: nextFrom, nextTo, nextBoth, stepBoth +// +// - checking if there are noders left in one or both of them with the +// remaining method and its associated returned type. +// +// - comparing the current noders of both merkletries in several ways, +// with the compare method and its associated returned type. +type doubleIter struct { + from struct { + iter *Iter + current noder.Path // nil if no more nodes + } + to struct { + iter *Iter + current noder.Path // nil if no more nodes + } + hashEqual noder.Equal +} + +// NewdoubleIter returns a new doubleIter for the merkletries "from" and +// "to". The hashEqual callback function will be used by the doubleIter +// to compare the hash of the noders in the merkletries. The doubleIter +// will be initialized to the first elements in each merkletrie if any. +func newDoubleIter(from, to noder.Noder, hashEqual noder.Equal) ( + *doubleIter, error) { + var ii doubleIter + var err error + + if ii.from.iter, err = NewIter(from); err != nil { + return nil, fmt.Errorf("from: %s", err) + } + if ii.from.current, err = ii.from.iter.Next(); turnEOFIntoNil(err) != nil { + return nil, fmt.Errorf("from: %s", err) + } + + if ii.to.iter, err = NewIter(to); err != nil { + return nil, fmt.Errorf("to: %s", err) + } + if ii.to.current, err = ii.to.iter.Next(); turnEOFIntoNil(err) != nil { + return nil, fmt.Errorf("to: %s", err) + } + + ii.hashEqual = hashEqual + + return &ii, nil +} + +func turnEOFIntoNil(e error) error { + if e != nil && e != io.EOF { + return e + } + return nil +} + +// NextBoth makes d advance to the next noder in both merkletries. If +// any of them is a directory, it skips its contents. +func (d *doubleIter) nextBoth() error { + if err := d.nextFrom(); err != nil { + return err + } + if err := d.nextTo(); err != nil { + return err + } + + return nil +} + +// NextFrom makes d advance to the next noder in the "from" merkletrie, +// skipping its contents if it is a directory. +func (d *doubleIter) nextFrom() (err error) { + d.from.current, err = d.from.iter.Next() + return turnEOFIntoNil(err) +} + +// NextTo makes d advance to the next noder in the "to" merkletrie, +// skipping its contents if it is a directory. +func (d *doubleIter) nextTo() (err error) { + d.to.current, err = d.to.iter.Next() + return turnEOFIntoNil(err) +} + +// StepBoth makes d advance to the next noder in both merkletries, +// getting deeper into directories if that is the case. +func (d *doubleIter) stepBoth() (err error) { + if d.from.current, err = d.from.iter.Step(); turnEOFIntoNil(err) != nil { + return err + } + if d.to.current, err = d.to.iter.Step(); turnEOFIntoNil(err) != nil { + return err + } + return nil +} + +// Remaining returns if there are no more noders in the tree, if both +// have noders or if one of them doesn't. +func (d *doubleIter) remaining() remaining { + if d.from.current == nil && d.to.current == nil { + return noMoreNoders + } + + if d.from.current == nil && d.to.current != nil { + return onlyToRemains + } + + if d.from.current != nil && d.to.current == nil { + return onlyFromRemains + } + + return bothHaveNodes +} + +// Remaining values tells you whether both trees still have noders, or +// only one of them or none of them. +type remaining int + +const ( + noMoreNoders remaining = iota + onlyToRemains + onlyFromRemains + bothHaveNodes +) + +// Compare returns the comparison between the current elements in the +// merkletries. +func (d *doubleIter) compare() (s comparison, err error) { + s.sameHash = d.hashEqual(d.from.current, d.to.current) + + fromIsDir := d.from.current.IsDir() + toIsDir := d.to.current.IsDir() + + s.bothAreDirs = fromIsDir && toIsDir + s.bothAreFiles = !fromIsDir && !toIsDir + s.fileAndDir = !s.bothAreDirs && !s.bothAreFiles + + fromNumChildren, err := d.from.current.NumChildren() + if err != nil { + return comparison{}, fmt.Errorf("from: %s", err) + } + + toNumChildren, err := d.to.current.NumChildren() + if err != nil { + return comparison{}, fmt.Errorf("to: %s", err) + } + + s.fromIsEmptyDir = fromIsDir && fromNumChildren == 0 + s.toIsEmptyDir = toIsDir && toNumChildren == 0 + + return +} + +// Answers to a lot of questions you can ask about how to noders are +// equal or different. +type comparison struct { + // the following are only valid if both nodes have the same name + // (i.e. nameComparison == 0) + + // Do both nodes have the same hash? + sameHash bool + // Are both nodes files? + bothAreFiles bool + + // the following are only valid if any of the noders are dirs, + // this is, if !bothAreFiles + + // Is one a file and the other a dir? + fileAndDir bool + // Are both nodes dirs? + bothAreDirs bool + // Is the from node an empty dir? + fromIsEmptyDir bool + // Is the to Node an empty dir? + toIsEmptyDir bool +} diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go new file mode 100644 index 0000000000..2fc3d7a638 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go @@ -0,0 +1,195 @@ +package filesystem + +import ( + "io" + "os" + "path" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/filemode" + "github.com/go-git/go-git/v5/utils/merkletrie/noder" + + "github.com/go-git/go-billy/v5" +) + +var ignore = map[string]bool{ + ".git": true, +} + +// The node represents a file or a directory in a billy.Filesystem. It +// implements the interface noder.Noder of merkletrie package. +// +// This implementation implements a "standard" hash method being able to be +// compared with any other noder.Noder implementation inside of go-git. +type node struct { + fs billy.Filesystem + submodules map[string]plumbing.Hash + + path string + hash []byte + children []noder.Noder + isDir bool +} + +// NewRootNode returns the root node based on a given billy.Filesystem. +// +// In order to provide the submodule hash status, a map[string]plumbing.Hash +// should be provided where the key is the path of the submodule and the commit +// of the submodule HEAD +func NewRootNode( + fs billy.Filesystem, + submodules map[string]plumbing.Hash, +) noder.Noder { + return &node{fs: fs, submodules: submodules, isDir: true} +} + +// Hash the hash of a filesystem is the result of concatenating the computed +// plumbing.Hash of the file as a Blob and its plumbing.FileMode; that way the +// difftree algorithm will detect changes in the contents of files and also in +// their mode. +// +// The hash of a directory is always a 24-bytes slice of zero values +func (n *node) Hash() []byte { + return n.hash +} + +func (n *node) Name() string { + return path.Base(n.path) +} + +func (n *node) IsDir() bool { + return n.isDir +} + +func (n *node) Children() ([]noder.Noder, error) { + if err := n.calculateChildren(); err != nil { + return nil, err + } + + return n.children, nil +} + +func (n *node) NumChildren() (int, error) { + if err := n.calculateChildren(); err != nil { + return -1, err + } + + return len(n.children), nil +} + +func (n *node) calculateChildren() error { + if !n.IsDir() { + return nil + } + + if len(n.children) != 0 { + return nil + } + + files, err := n.fs.ReadDir(n.path) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + for _, file := range files { + if _, ok := ignore[file.Name()]; ok { + continue + } + + c, err := n.newChildNode(file) + if err != nil { + return err + } + + n.children = append(n.children, c) + } + + return nil +} + +func (n *node) newChildNode(file os.FileInfo) (*node, error) { + path := path.Join(n.path, file.Name()) + + hash, err := n.calculateHash(path, file) + if err != nil { + return nil, err + } + + node := &node{ + fs: n.fs, + submodules: n.submodules, + + path: path, + hash: hash, + isDir: file.IsDir(), + } + + if hash, isSubmodule := n.submodules[path]; isSubmodule { + node.hash = append(hash[:], filemode.Submodule.Bytes()...) + node.isDir = false + } + + return node, nil +} + +func (n *node) calculateHash(path string, file os.FileInfo) ([]byte, error) { + if file.IsDir() { + return make([]byte, 24), nil + } + + var hash plumbing.Hash + var err error + if file.Mode()&os.ModeSymlink != 0 { + hash, err = n.doCalculateHashForSymlink(path, file) + } else { + hash, err = n.doCalculateHashForRegular(path, file) + } + + if err != nil { + return nil, err + } + + mode, err := filemode.NewFromOSFileMode(file.Mode()) + if err != nil { + return nil, err + } + + return append(hash[:], mode.Bytes()...), nil +} + +func (n *node) doCalculateHashForRegular(path string, file os.FileInfo) (plumbing.Hash, error) { + f, err := n.fs.Open(path) + if err != nil { + return plumbing.ZeroHash, err + } + + defer f.Close() + + h := plumbing.NewHasher(plumbing.BlobObject, file.Size()) + if _, err := io.Copy(h, f); err != nil { + return plumbing.ZeroHash, err + } + + return h.Sum(), nil +} + +func (n *node) doCalculateHashForSymlink(path string, file os.FileInfo) (plumbing.Hash, error) { + target, err := n.fs.Readlink(path) + if err != nil { + return plumbing.ZeroHash, err + } + + h := plumbing.NewHasher(plumbing.BlobObject, file.Size()) + if _, err := h.Write([]byte(target)); err != nil { + return plumbing.ZeroHash, err + } + + return h.Sum(), nil +} + +func (n *node) String() string { + return n.path +} diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/index/node.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/index/node.go new file mode 100644 index 0000000000..d05b0c694d --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/index/node.go @@ -0,0 +1,90 @@ +package index + +import ( + "path" + "strings" + + "github.com/go-git/go-git/v5/plumbing/format/index" + "github.com/go-git/go-git/v5/utils/merkletrie/noder" +) + +// The node represents a index.Entry or a directory inferred from the path +// of all entries. It implements the interface noder.Noder of merkletrie +// package. +// +// This implementation implements a "standard" hash method being able to be +// compared with any other noder.Noder implementation inside of go-git +type node struct { + path string + entry *index.Entry + children []noder.Noder + isDir bool +} + +// NewRootNode returns the root node of a computed tree from a index.Index, +func NewRootNode(idx *index.Index) noder.Noder { + const rootNode = "" + + m := map[string]*node{rootNode: {isDir: true}} + + for _, e := range idx.Entries { + parts := strings.Split(e.Name, string("/")) + + var fullpath string + for _, part := range parts { + parent := fullpath + fullpath = path.Join(fullpath, part) + + if _, ok := m[fullpath]; ok { + continue + } + + n := &node{path: fullpath} + if fullpath == e.Name { + n.entry = e + } else { + n.isDir = true + } + + m[n.path] = n + m[parent].children = append(m[parent].children, n) + } + } + + return m[rootNode] +} + +func (n *node) String() string { + return n.path +} + +// Hash the hash of a filesystem is a 24-byte slice, is the result of +// concatenating the computed plumbing.Hash of the file as a Blob and its +// plumbing.FileMode; that way the difftree algorithm will detect changes in the +// contents of files and also in their mode. +// +// If the node is computed and not based on a index.Entry the hash is equals +// to a 24-bytes slices of zero values. +func (n *node) Hash() []byte { + if n.entry == nil { + return make([]byte, 24) + } + + return append(n.entry.Hash[:], n.entry.Mode.Bytes()...) +} + +func (n *node) Name() string { + return path.Base(n.path) +} + +func (n *node) IsDir() bool { + return n.isDir +} + +func (n *node) Children() ([]noder.Noder, error) { + return n.children, nil +} + +func (n *node) NumChildren() (int, error) { + return len(n.children), nil +} diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/internal/frame/frame.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/internal/frame/frame.go new file mode 100644 index 0000000000..131878a1c7 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/internal/frame/frame.go @@ -0,0 +1,91 @@ +package frame + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "github.com/go-git/go-git/v5/utils/merkletrie/noder" +) + +// A Frame is a collection of siblings in a trie, sorted alphabetically +// by name. +type Frame struct { + // siblings, sorted in reverse alphabetical order by name + stack []noder.Noder +} + +type byName []noder.Noder + +func (a byName) Len() int { return len(a) } +func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byName) Less(i, j int) bool { + return strings.Compare(a[i].Name(), a[j].Name()) < 0 +} + +// New returns a frame with the children of the provided node. +func New(n noder.Noder) (*Frame, error) { + children, err := n.Children() + if err != nil { + return nil, err + } + + sort.Sort(sort.Reverse(byName(children))) + return &Frame{ + stack: children, + }, nil +} + +// String returns the quoted names of the noders in the frame sorted in +// alphabetical order by name, surrounded by square brackets and +// separated by comas. +// +// Examples: +// [] +// ["a", "b"] +func (f *Frame) String() string { + var buf bytes.Buffer + _ = buf.WriteByte('[') + + sep := "" + for i := f.Len() - 1; i >= 0; i-- { + _, _ = buf.WriteString(sep) + sep = ", " + _, _ = buf.WriteString(fmt.Sprintf("%q", f.stack[i].Name())) + } + + _ = buf.WriteByte(']') + + return buf.String() +} + +// First returns, but dont extract, the noder with the alphabetically +// smaller name in the frame and true if the frame was not empty. +// Otherwise it returns nil and false. +func (f *Frame) First() (noder.Noder, bool) { + if f.Len() == 0 { + return nil, false + } + + top := f.Len() - 1 + + return f.stack[top], true +} + +// Drop extracts the noder with the alphabetically smaller name in the +// frame or does nothing if the frame was empty. +func (f *Frame) Drop() { + if f.Len() == 0 { + return + } + + top := f.Len() - 1 + f.stack[top] = nil + f.stack = f.stack[:top] +} + +// Len returns the number of noders in the frame. +func (f *Frame) Len() int { + return len(f.stack) +} diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/iter.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/iter.go new file mode 100644 index 0000000000..d75afec464 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/iter.go @@ -0,0 +1,216 @@ +package merkletrie + +import ( + "fmt" + "io" + + "github.com/go-git/go-git/v5/utils/merkletrie/internal/frame" + "github.com/go-git/go-git/v5/utils/merkletrie/noder" +) + +// Iter is an iterator for merkletries (only the trie part of the +// merkletrie is relevant here, it does not use the Hasher interface). +// +// The iteration is performed in depth-first pre-order. Entries at each +// depth are traversed in (case-sensitive) alphabetical order. +// +// This is the kind of traversal you will expect when listing ordinary +// files and directories recursively, for example: +// +// Trie Traversal order +// ---- --------------- +// . +// / | \ c +// / | \ d/ +// d c z ===> d/a +// / \ d/b +// b a z +// +// +// This iterator is somewhat especial as you can chose to skip whole +// "directories" when iterating: +// +// - The Step method will iterate normally. +// +// - the Next method will not descend deeper into the tree. +// +// For example, if the iterator is at `d/`, the Step method will return +// `d/a` while the Next would have returned `z` instead (skipping `d/` +// and its descendants). The name of the these two methods are based on +// the well known "next" and "step" operations, quite common in +// debuggers, like gdb. +// +// The paths returned by the iterator will be relative, if the iterator +// was created from a single node, or absolute, if the iterator was +// created from the path to the node (the path will be prefixed to all +// returned paths). +type Iter struct { + // Tells if the iteration has started. + hasStarted bool + // The top of this stack has the current node and its siblings. The + // rest of the stack keeps the ancestors of the current node and + // their corresponding siblings. The current element is always the + // top element of the top frame. + // + // When "step"ping into a node, its children are pushed as a new + // frame. + // + // When "next"ing pass a node, the current element is dropped by + // popping the top frame. + frameStack []*frame.Frame + // The base path used to turn the relative paths used internally by + // the iterator into absolute paths used by external applications. + // For relative iterator this will be nil. + base noder.Path +} + +// NewIter returns a new relative iterator using the provider noder as +// its unnamed root. When iterating, all returned paths will be +// relative to node. +func NewIter(n noder.Noder) (*Iter, error) { + return newIter(n, nil) +} + +// NewIterFromPath returns a new absolute iterator from the noder at the +// end of the path p. When iterating, all returned paths will be +// absolute, using the root of the path p as their root. +func NewIterFromPath(p noder.Path) (*Iter, error) { + return newIter(p, p) // Path implements Noder +} + +func newIter(root noder.Noder, base noder.Path) (*Iter, error) { + ret := &Iter{ + base: base, + } + + if root == nil { + return ret, nil + } + + frame, err := frame.New(root) + if err != nil { + return nil, err + } + ret.push(frame) + + return ret, nil +} + +func (iter *Iter) top() (*frame.Frame, bool) { + if len(iter.frameStack) == 0 { + return nil, false + } + top := len(iter.frameStack) - 1 + + return iter.frameStack[top], true +} + +func (iter *Iter) push(f *frame.Frame) { + iter.frameStack = append(iter.frameStack, f) +} + +const ( + doDescend = true + dontDescend = false +) + +// Next returns the path of the next node without descending deeper into +// the trie and nil. If there are no more entries in the trie it +// returns nil and io.EOF. In case of error, it will return nil and the +// error. +func (iter *Iter) Next() (noder.Path, error) { + return iter.advance(dontDescend) +} + +// Step returns the path to the next node in the trie, descending deeper +// into it if needed, and nil. If there are no more nodes in the trie, +// it returns nil and io.EOF. In case of error, it will return nil and +// the error. +func (iter *Iter) Step() (noder.Path, error) { + return iter.advance(doDescend) +} + +// Advances the iterator in the desired direction: descend or +// dontDescend. +// +// Returns the new current element and a nil error on success. If there +// are no more elements in the trie below the base, it returns nil, and +// io.EOF. Returns nil and an error in case of errors. +func (iter *Iter) advance(wantDescend bool) (noder.Path, error) { + current, err := iter.current() + if err != nil { + return nil, err + } + + // The first time we just return the current node. + if !iter.hasStarted { + iter.hasStarted = true + return current, nil + } + + // Advances means getting a next current node, either its first child or + // its next sibling, depending if we must descend or not. + numChildren, err := current.NumChildren() + if err != nil { + return nil, err + } + + mustDescend := numChildren != 0 && wantDescend + if mustDescend { + // descend: add a new frame with the current's children. + frame, err := frame.New(current) + if err != nil { + return nil, err + } + iter.push(frame) + } else { + // don't descend: just drop the current node + iter.drop() + } + + return iter.current() +} + +// Returns the path to the current node, adding the base if there was +// one, and a nil error. If there were no noders left, it returns nil +// and io.EOF. If an error occurred, it returns nil and the error. +func (iter *Iter) current() (noder.Path, error) { + if topFrame, ok := iter.top(); !ok { + return nil, io.EOF + } else if _, ok := topFrame.First(); !ok { + return nil, io.EOF + } + + ret := make(noder.Path, 0, len(iter.base)+len(iter.frameStack)) + + // concat the base... + ret = append(ret, iter.base...) + // ... and the current node and all its ancestors + for i, f := range iter.frameStack { + t, ok := f.First() + if !ok { + panic(fmt.Sprintf("frame %d is empty", i)) + } + ret = append(ret, t) + } + + return ret, nil +} + +// removes the current node if any, and all the frames that become empty as a +// consequence of this action. +func (iter *Iter) drop() { + frame, ok := iter.top() + if !ok { + return + } + + frame.Drop() + // if the frame is empty, remove it and its parent, recursively + if frame.Len() == 0 { + top := len(iter.frameStack) - 1 + iter.frameStack[top] = nil + iter.frameStack = iter.frameStack[:top] + iter.drop() + } +} diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/noder.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/noder.go new file mode 100644 index 0000000000..d6b3de4ada --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/noder.go @@ -0,0 +1,59 @@ +// Package noder provide an interface for defining nodes in a +// merkletrie, their hashes and their paths (a noders and its +// ancestors). +// +// The hasher interface is easy to implement naively by elements that +// already have a hash, like git blobs and trees. More sophisticated +// implementations can implement the Equal function in exotic ways +// though: for instance, comparing the modification time of directories +// in a filesystem. +package noder + +import "fmt" + +// Hasher interface is implemented by types that can tell you +// their hash. +type Hasher interface { + Hash() []byte +} + +// Equal functions take two hashers and return if they are equal. +// +// These functions are expected to be faster than reflect.Equal or +// reflect.DeepEqual because they can compare just the hash of the +// objects, instead of their contents, so they are expected to be O(1). +type Equal func(a, b Hasher) bool + +// The Noder interface is implemented by the elements of a Merkle Trie. +// +// There are two types of elements in a Merkle Trie: +// +// - file-like nodes: they cannot have children. +// +// - directory-like nodes: they can have 0 or more children and their +// hash is calculated by combining their children hashes. +type Noder interface { + Hasher + fmt.Stringer // for testing purposes + // Name returns the name of an element (relative, not its full + // path). + Name() string + // IsDir returns true if the element is a directory-like node or + // false if it is a file-like node. + IsDir() bool + // Children returns the children of the element. Note that empty + // directory-like noders and file-like noders will both return + // NoChildren. + Children() ([]Noder, error) + // NumChildren returns the number of children this element has. + // + // This method is an optimization: the number of children is easily + // calculated as the length of the value returned by the Children + // method (above); yet, some implementations will be able to + // implement NumChildren in O(1) while Children is usually more + // complex. + NumChildren() (int, error) +} + +// NoChildren represents the children of a noder without children. +var NoChildren = []Noder{} diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/path.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/path.go new file mode 100644 index 0000000000..1c7ef54eeb --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/path.go @@ -0,0 +1,90 @@ +package noder + +import ( + "bytes" + "strings" +) + +// Path values represent a noder and its ancestors. The root goes first +// and the actual final noder the path is referring to will be the last. +// +// A path implements the Noder interface, redirecting all the interface +// calls to its final noder. +// +// Paths build from an empty Noder slice are not valid paths and should +// not be used. +type Path []Noder + +// String returns the full path of the final noder as a string, using +// "/" as the separator. +func (p Path) String() string { + var buf bytes.Buffer + sep := "" + for _, e := range p { + _, _ = buf.WriteString(sep) + sep = "/" + _, _ = buf.WriteString(e.Name()) + } + + return buf.String() +} + +// Last returns the final noder in the path. +func (p Path) Last() Noder { + return p[len(p)-1] +} + +// Hash returns the hash of the final noder of the path. +func (p Path) Hash() []byte { + return p.Last().Hash() +} + +// Name returns the name of the final noder of the path. +func (p Path) Name() string { + return p.Last().Name() +} + +// IsDir returns if the final noder of the path is a directory-like +// noder. +func (p Path) IsDir() bool { + return p.Last().IsDir() +} + +// Children returns the children of the final noder in the path. +func (p Path) Children() ([]Noder, error) { + return p.Last().Children() +} + +// NumChildren returns the number of children the final noder of the +// path has. +func (p Path) NumChildren() (int, error) { + return p.Last().NumChildren() +} + +// Compare returns -1, 0 or 1 if the path p is smaller, equal or bigger +// than other, in "directory order"; for example: +// +// "a" < "b" +// "a/b/c/d/z" < "b" +// "a/b/a" > "a/b" +func (p Path) Compare(other Path) int { + i := 0 + for { + switch { + case len(other) == len(p) && i == len(p): + return 0 + case i == len(other): + return 1 + case i == len(p): + return -1 + default: + // We do *not* normalize Unicode here. CGit doesn't. + // https://github.com/src-d/go-git/issues/1057 + cmp := strings.Compare(p[i].Name(), other[i].Name()) + if cmp != 0 { + return cmp + } + } + i++ + } +} diff --git a/vendor/github.com/go-git/go-git/v5/worktree.go b/vendor/github.com/go-git/go-git/v5/worktree.go new file mode 100644 index 0000000000..62ad03b95c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/worktree.go @@ -0,0 +1,959 @@ +package git + +import ( + "context" + "errors" + "fmt" + "io" + stdioutil "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/go-git/go-git/v5/config" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/filemode" + "github.com/go-git/go-git/v5/plumbing/format/gitignore" + "github.com/go-git/go-git/v5/plumbing/format/index" + "github.com/go-git/go-git/v5/plumbing/object" + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/utils/ioutil" + "github.com/go-git/go-git/v5/utils/merkletrie" + + "github.com/go-git/go-billy/v5" + "github.com/go-git/go-billy/v5/util" +) + +var ( + ErrWorktreeNotClean = errors.New("worktree is not clean") + ErrSubmoduleNotFound = errors.New("submodule not found") + ErrUnstagedChanges = errors.New("worktree contains unstaged changes") + ErrGitModulesSymlink = errors.New(gitmodulesFile + " is a symlink") + ErrNonFastForwardUpdate = errors.New("non-fast-forward update") +) + +// Worktree represents a git worktree. +type Worktree struct { + // Filesystem underlying filesystem. + Filesystem billy.Filesystem + // External excludes not found in the repository .gitignore + Excludes []gitignore.Pattern + + r *Repository +} + +// Pull incorporates changes from a remote repository into the current branch. +// Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are +// no changes to be fetched, or an error. +// +// Pull only supports merges where the can be resolved as a fast-forward. +func (w *Worktree) Pull(o *PullOptions) error { + return w.PullContext(context.Background(), o) +} + +// PullContext incorporates changes from a remote repository into the current +// branch. Returns nil if the operation is successful, NoErrAlreadyUpToDate if +// there are no changes to be fetched, or an error. +// +// Pull only supports merges where the can be resolved as a fast-forward. +// +// The provided Context must be non-nil. If the context expires before the +// operation is complete, an error is returned. The context only affects to the +// transport operations. +func (w *Worktree) PullContext(ctx context.Context, o *PullOptions) error { + if err := o.Validate(); err != nil { + return err + } + + remote, err := w.r.Remote(o.RemoteName) + if err != nil { + return err + } + + fetchHead, err := remote.fetch(ctx, &FetchOptions{ + RemoteName: o.RemoteName, + Depth: o.Depth, + Auth: o.Auth, + Progress: o.Progress, + Force: o.Force, + }) + + updated := true + if err == NoErrAlreadyUpToDate { + updated = false + } else if err != nil { + return err + } + + ref, err := storer.ResolveReference(fetchHead, o.ReferenceName) + if err != nil { + return err + } + + head, err := w.r.Head() + if err == nil { + headAheadOfRef, err := isFastForward(w.r.Storer, ref.Hash(), head.Hash()) + if err != nil { + return err + } + + if !updated && headAheadOfRef { + return NoErrAlreadyUpToDate + } + + ff, err := isFastForward(w.r.Storer, head.Hash(), ref.Hash()) + if err != nil { + return err + } + + if !ff { + return ErrNonFastForwardUpdate + } + } + + if err != nil && err != plumbing.ErrReferenceNotFound { + return err + } + + if err := w.updateHEAD(ref.Hash()); err != nil { + return err + } + + if err := w.Reset(&ResetOptions{ + Mode: MergeReset, + Commit: ref.Hash(), + }); err != nil { + return err + } + + if o.RecurseSubmodules != NoRecurseSubmodules { + return w.updateSubmodules(&SubmoduleUpdateOptions{ + RecurseSubmodules: o.RecurseSubmodules, + Auth: o.Auth, + }) + } + + return nil +} + +func (w *Worktree) updateSubmodules(o *SubmoduleUpdateOptions) error { + s, err := w.Submodules() + if err != nil { + return err + } + o.Init = true + return s.Update(o) +} + +// Checkout switch branches or restore working tree files. +func (w *Worktree) Checkout(opts *CheckoutOptions) error { + if err := opts.Validate(); err != nil { + return err + } + + if opts.Create { + if err := w.createBranch(opts); err != nil { + return err + } + } + + c, err := w.getCommitFromCheckoutOptions(opts) + if err != nil { + return err + } + + ro := &ResetOptions{Commit: c, Mode: MergeReset} + if opts.Force { + ro.Mode = HardReset + } else if opts.Keep { + ro.Mode = SoftReset + } + + if !opts.Hash.IsZero() && !opts.Create { + err = w.setHEADToCommit(opts.Hash) + } else { + err = w.setHEADToBranch(opts.Branch, c) + } + + if err != nil { + return err + } + + return w.Reset(ro) +} +func (w *Worktree) createBranch(opts *CheckoutOptions) error { + _, err := w.r.Storer.Reference(opts.Branch) + if err == nil { + return fmt.Errorf("a branch named %q already exists", opts.Branch) + } + + if err != plumbing.ErrReferenceNotFound { + return err + } + + if opts.Hash.IsZero() { + ref, err := w.r.Head() + if err != nil { + return err + } + + opts.Hash = ref.Hash() + } + + return w.r.Storer.SetReference( + plumbing.NewHashReference(opts.Branch, opts.Hash), + ) +} + +func (w *Worktree) getCommitFromCheckoutOptions(opts *CheckoutOptions) (plumbing.Hash, error) { + if !opts.Hash.IsZero() { + return opts.Hash, nil + } + + b, err := w.r.Reference(opts.Branch, true) + if err != nil { + return plumbing.ZeroHash, err + } + + if !b.Name().IsTag() { + return b.Hash(), nil + } + + o, err := w.r.Object(plumbing.AnyObject, b.Hash()) + if err != nil { + return plumbing.ZeroHash, err + } + + switch o := o.(type) { + case *object.Tag: + if o.TargetType != plumbing.CommitObject { + return plumbing.ZeroHash, fmt.Errorf("unsupported tag object target %q", o.TargetType) + } + + return o.Target, nil + case *object.Commit: + return o.Hash, nil + } + + return plumbing.ZeroHash, fmt.Errorf("unsupported tag target %q", o.Type()) +} + +func (w *Worktree) setHEADToCommit(commit plumbing.Hash) error { + head := plumbing.NewHashReference(plumbing.HEAD, commit) + return w.r.Storer.SetReference(head) +} + +func (w *Worktree) setHEADToBranch(branch plumbing.ReferenceName, commit plumbing.Hash) error { + target, err := w.r.Storer.Reference(branch) + if err != nil { + return err + } + + var head *plumbing.Reference + if target.Name().IsBranch() { + head = plumbing.NewSymbolicReference(plumbing.HEAD, target.Name()) + } else { + head = plumbing.NewHashReference(plumbing.HEAD, commit) + } + + return w.r.Storer.SetReference(head) +} + +// Reset the worktree to a specified state. +func (w *Worktree) Reset(opts *ResetOptions) error { + if err := opts.Validate(w.r); err != nil { + return err + } + + if opts.Mode == MergeReset { + unstaged, err := w.containsUnstagedChanges() + if err != nil { + return err + } + + if unstaged { + return ErrUnstagedChanges + } + } + + if err := w.setHEADCommit(opts.Commit); err != nil { + return err + } + + if opts.Mode == SoftReset { + return nil + } + + t, err := w.getTreeFromCommitHash(opts.Commit) + if err != nil { + return err + } + + if opts.Mode == MixedReset || opts.Mode == MergeReset || opts.Mode == HardReset { + if err := w.resetIndex(t); err != nil { + return err + } + } + + if opts.Mode == MergeReset || opts.Mode == HardReset { + if err := w.resetWorktree(t); err != nil { + return err + } + } + + return nil +} + +func (w *Worktree) resetIndex(t *object.Tree) error { + idx, err := w.r.Storer.Index() + if err != nil { + return err + } + b := newIndexBuilder(idx) + + changes, err := w.diffTreeWithStaging(t, true) + if err != nil { + return err + } + + for _, ch := range changes { + a, err := ch.Action() + if err != nil { + return err + } + + var name string + var e *object.TreeEntry + + switch a { + case merkletrie.Modify, merkletrie.Insert: + name = ch.To.String() + e, err = t.FindEntry(name) + if err != nil { + return err + } + case merkletrie.Delete: + name = ch.From.String() + } + + b.Remove(name) + if e == nil { + continue + } + + b.Add(&index.Entry{ + Name: name, + Hash: e.Hash, + Mode: e.Mode, + }) + + } + + b.Write(idx) + return w.r.Storer.SetIndex(idx) +} + +func (w *Worktree) resetWorktree(t *object.Tree) error { + changes, err := w.diffStagingWithWorktree(true) + if err != nil { + return err + } + + idx, err := w.r.Storer.Index() + if err != nil { + return err + } + b := newIndexBuilder(idx) + + for _, ch := range changes { + if err := w.checkoutChange(ch, t, b); err != nil { + return err + } + } + + b.Write(idx) + return w.r.Storer.SetIndex(idx) +} + +func (w *Worktree) checkoutChange(ch merkletrie.Change, t *object.Tree, idx *indexBuilder) error { + a, err := ch.Action() + if err != nil { + return err + } + + var e *object.TreeEntry + var name string + var isSubmodule bool + + switch a { + case merkletrie.Modify, merkletrie.Insert: + name = ch.To.String() + e, err = t.FindEntry(name) + if err != nil { + return err + } + + isSubmodule = e.Mode == filemode.Submodule + case merkletrie.Delete: + return rmFileAndDirIfEmpty(w.Filesystem, ch.From.String()) + } + + if isSubmodule { + return w.checkoutChangeSubmodule(name, a, e, idx) + } + + return w.checkoutChangeRegularFile(name, a, t, e, idx) +} + +func (w *Worktree) containsUnstagedChanges() (bool, error) { + ch, err := w.diffStagingWithWorktree(false) + if err != nil { + return false, err + } + + for _, c := range ch { + a, err := c.Action() + if err != nil { + return false, err + } + + if a == merkletrie.Insert { + continue + } + + return true, nil + } + + return false, nil +} + +func (w *Worktree) setHEADCommit(commit plumbing.Hash) error { + head, err := w.r.Reference(plumbing.HEAD, false) + if err != nil { + return err + } + + if head.Type() == plumbing.HashReference { + head = plumbing.NewHashReference(plumbing.HEAD, commit) + return w.r.Storer.SetReference(head) + } + + branch, err := w.r.Reference(head.Target(), false) + if err != nil { + return err + } + + if !branch.Name().IsBranch() { + return fmt.Errorf("invalid HEAD target should be a branch, found %s", branch.Type()) + } + + branch = plumbing.NewHashReference(branch.Name(), commit) + return w.r.Storer.SetReference(branch) +} + +func (w *Worktree) checkoutChangeSubmodule(name string, + a merkletrie.Action, + e *object.TreeEntry, + idx *indexBuilder, +) error { + switch a { + case merkletrie.Modify: + sub, err := w.Submodule(name) + if err != nil { + return err + } + + if !sub.initialized { + return nil + } + + return w.addIndexFromTreeEntry(name, e, idx) + case merkletrie.Insert: + mode, err := e.Mode.ToOSFileMode() + if err != nil { + return err + } + + if err := w.Filesystem.MkdirAll(name, mode); err != nil { + return err + } + + return w.addIndexFromTreeEntry(name, e, idx) + } + + return nil +} + +func (w *Worktree) checkoutChangeRegularFile(name string, + a merkletrie.Action, + t *object.Tree, + e *object.TreeEntry, + idx *indexBuilder, +) error { + switch a { + case merkletrie.Modify: + idx.Remove(name) + + // to apply perm changes the file is deleted, billy doesn't implement + // chmod + if err := w.Filesystem.Remove(name); err != nil { + return err + } + + fallthrough + case merkletrie.Insert: + f, err := t.File(name) + if err != nil { + return err + } + + if err := w.checkoutFile(f); err != nil { + return err + } + + return w.addIndexFromFile(name, e.Hash, idx) + } + + return nil +} + +var copyBufferPool = sync.Pool{ + New: func() interface{} { + return make([]byte, 32*1024) + }, +} + +func (w *Worktree) checkoutFile(f *object.File) (err error) { + mode, err := f.Mode.ToOSFileMode() + if err != nil { + return + } + + if mode&os.ModeSymlink != 0 { + return w.checkoutFileSymlink(f) + } + + from, err := f.Reader() + if err != nil { + return + } + + defer ioutil.CheckClose(from, &err) + + to, err := w.Filesystem.OpenFile(f.Name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode.Perm()) + if err != nil { + return + } + + defer ioutil.CheckClose(to, &err) + buf := copyBufferPool.Get().([]byte) + _, err = io.CopyBuffer(to, from, buf) + copyBufferPool.Put(buf) + return +} + +func (w *Worktree) checkoutFileSymlink(f *object.File) (err error) { + from, err := f.Reader() + if err != nil { + return + } + + defer ioutil.CheckClose(from, &err) + + bytes, err := stdioutil.ReadAll(from) + if err != nil { + return + } + + err = w.Filesystem.Symlink(string(bytes), f.Name) + + // On windows, this might fail. + // Follow Git on Windows behavior by writing the link as it is. + if err != nil && isSymlinkWindowsNonAdmin(err) { + mode, _ := f.Mode.ToOSFileMode() + + to, err := w.Filesystem.OpenFile(f.Name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode.Perm()) + if err != nil { + return err + } + + defer ioutil.CheckClose(to, &err) + + _, err = to.Write(bytes) + return err + } + return +} + +func (w *Worktree) addIndexFromTreeEntry(name string, f *object.TreeEntry, idx *indexBuilder) error { + idx.Remove(name) + idx.Add(&index.Entry{ + Hash: f.Hash, + Name: name, + Mode: filemode.Submodule, + }) + return nil +} + +func (w *Worktree) addIndexFromFile(name string, h plumbing.Hash, idx *indexBuilder) error { + idx.Remove(name) + fi, err := w.Filesystem.Lstat(name) + if err != nil { + return err + } + + mode, err := filemode.NewFromOSFileMode(fi.Mode()) + if err != nil { + return err + } + + e := &index.Entry{ + Hash: h, + Name: name, + Mode: mode, + ModifiedAt: fi.ModTime(), + Size: uint32(fi.Size()), + } + + // if the FileInfo.Sys() comes from os the ctime, dev, inode, uid and gid + // can be retrieved, otherwise this doesn't apply + if fillSystemInfo != nil { + fillSystemInfo(e, fi.Sys()) + } + idx.Add(e) + return nil +} + +func (w *Worktree) getTreeFromCommitHash(commit plumbing.Hash) (*object.Tree, error) { + c, err := w.r.CommitObject(commit) + if err != nil { + return nil, err + } + + return c.Tree() +} + +var fillSystemInfo func(e *index.Entry, sys interface{}) + +const gitmodulesFile = ".gitmodules" + +// Submodule returns the submodule with the given name +func (w *Worktree) Submodule(name string) (*Submodule, error) { + l, err := w.Submodules() + if err != nil { + return nil, err + } + + for _, m := range l { + if m.Config().Name == name { + return m, nil + } + } + + return nil, ErrSubmoduleNotFound +} + +// Submodules returns all the available submodules +func (w *Worktree) Submodules() (Submodules, error) { + l := make(Submodules, 0) + m, err := w.readGitmodulesFile() + if err != nil || m == nil { + return l, err + } + + c, err := w.r.Config() + if err != nil { + return nil, err + } + + for _, s := range m.Submodules { + l = append(l, w.newSubmodule(s, c.Submodules[s.Name])) + } + + return l, nil +} + +func (w *Worktree) newSubmodule(fromModules, fromConfig *config.Submodule) *Submodule { + m := &Submodule{w: w} + m.initialized = fromConfig != nil + + if !m.initialized { + m.c = fromModules + return m + } + + m.c = fromConfig + m.c.Path = fromModules.Path + return m +} + +func (w *Worktree) isSymlink(path string) bool { + if s, err := w.Filesystem.Lstat(path); err == nil { + return s.Mode()&os.ModeSymlink != 0 + } + return false +} + +func (w *Worktree) readGitmodulesFile() (*config.Modules, error) { + if w.isSymlink(gitmodulesFile) { + return nil, ErrGitModulesSymlink + } + + f, err := w.Filesystem.Open(gitmodulesFile) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + + return nil, err + } + + defer f.Close() + input, err := stdioutil.ReadAll(f) + if err != nil { + return nil, err + } + + m := config.NewModules() + return m, m.Unmarshal(input) +} + +// Clean the worktree by removing untracked files. +// An empty dir could be removed - this is what `git clean -f -d .` does. +func (w *Worktree) Clean(opts *CleanOptions) error { + s, err := w.Status() + if err != nil { + return err + } + + root := "" + files, err := w.Filesystem.ReadDir(root) + if err != nil { + return err + } + return w.doClean(s, opts, root, files) +} + +func (w *Worktree) doClean(status Status, opts *CleanOptions, dir string, files []os.FileInfo) error { + for _, fi := range files { + if fi.Name() == GitDirName { + continue + } + + // relative path under the root + path := filepath.Join(dir, fi.Name()) + if fi.IsDir() { + if !opts.Dir { + continue + } + + subfiles, err := w.Filesystem.ReadDir(path) + if err != nil { + return err + } + err = w.doClean(status, opts, path, subfiles) + if err != nil { + return err + } + } else { + if status.IsUntracked(path) { + if err := w.Filesystem.Remove(path); err != nil { + return err + } + } + } + } + + if opts.Dir { + return doCleanDirectories(w.Filesystem, dir) + } + return nil +} + +// GrepResult is structure of a grep result. +type GrepResult struct { + // FileName is the name of file which contains match. + FileName string + // LineNumber is the line number of a file at which a match was found. + LineNumber int + // Content is the content of the file at the matching line. + Content string + // TreeName is the name of the tree (reference name/commit hash) at + // which the match was performed. + TreeName string +} + +func (gr GrepResult) String() string { + return fmt.Sprintf("%s:%s:%d:%s", gr.TreeName, gr.FileName, gr.LineNumber, gr.Content) +} + +// Grep performs grep on a worktree. +func (w *Worktree) Grep(opts *GrepOptions) ([]GrepResult, error) { + if err := opts.Validate(w); err != nil { + return nil, err + } + + // Obtain commit hash from options (CommitHash or ReferenceName). + var commitHash plumbing.Hash + // treeName contains the value of TreeName in GrepResult. + var treeName string + + if opts.ReferenceName != "" { + ref, err := w.r.Reference(opts.ReferenceName, true) + if err != nil { + return nil, err + } + commitHash = ref.Hash() + treeName = opts.ReferenceName.String() + } else if !opts.CommitHash.IsZero() { + commitHash = opts.CommitHash + treeName = opts.CommitHash.String() + } + + // Obtain a tree from the commit hash and get a tracked files iterator from + // the tree. + tree, err := w.getTreeFromCommitHash(commitHash) + if err != nil { + return nil, err + } + fileiter := tree.Files() + + return findMatchInFiles(fileiter, treeName, opts) +} + +// findMatchInFiles takes a FileIter, worktree name and GrepOptions, and +// returns a slice of GrepResult containing the result of regex pattern matching +// in content of all the files. +func findMatchInFiles(fileiter *object.FileIter, treeName string, opts *GrepOptions) ([]GrepResult, error) { + var results []GrepResult + + err := fileiter.ForEach(func(file *object.File) error { + var fileInPathSpec bool + + // When no pathspecs are provided, search all the files. + if len(opts.PathSpecs) == 0 { + fileInPathSpec = true + } + + // Check if the file name matches with the pathspec. Break out of the + // loop once a match is found. + for _, pathSpec := range opts.PathSpecs { + if pathSpec != nil && pathSpec.MatchString(file.Name) { + fileInPathSpec = true + break + } + } + + // If the file does not match with any of the pathspec, skip it. + if !fileInPathSpec { + return nil + } + + grepResults, err := findMatchInFile(file, treeName, opts) + if err != nil { + return err + } + results = append(results, grepResults...) + + return nil + }) + + return results, err +} + +// findMatchInFile takes a single File, worktree name and GrepOptions, +// and returns a slice of GrepResult containing the result of regex pattern +// matching in the given file. +func findMatchInFile(file *object.File, treeName string, opts *GrepOptions) ([]GrepResult, error) { + var grepResults []GrepResult + + content, err := file.Contents() + if err != nil { + return grepResults, err + } + + // Split the file content and parse line-by-line. + contentByLine := strings.Split(content, "\n") + for lineNum, cnt := range contentByLine { + addToResult := false + + // Match the patterns and content. Break out of the loop once a + // match is found. + for _, pattern := range opts.Patterns { + if pattern != nil && pattern.MatchString(cnt) { + // Add to result only if invert match is not enabled. + if !opts.InvertMatch { + addToResult = true + break + } + } else if opts.InvertMatch { + // If matching fails, and invert match is enabled, add to + // results. + addToResult = true + break + } + } + + if addToResult { + grepResults = append(grepResults, GrepResult{ + FileName: file.Name, + LineNumber: lineNum + 1, + Content: cnt, + TreeName: treeName, + }) + } + } + + return grepResults, nil +} + +func rmFileAndDirIfEmpty(fs billy.Filesystem, name string) error { + if err := util.RemoveAll(fs, name); err != nil { + return err + } + + dir := filepath.Dir(name) + return doCleanDirectories(fs, dir) +} + +// doCleanDirectories removes empty subdirs (without files) +func doCleanDirectories(fs billy.Filesystem, dir string) error { + files, err := fs.ReadDir(dir) + if err != nil { + return err + } + if len(files) == 0 { + return fs.Remove(dir) + } + return nil +} + +type indexBuilder struct { + entries map[string]*index.Entry +} + +func newIndexBuilder(idx *index.Index) *indexBuilder { + entries := make(map[string]*index.Entry, len(idx.Entries)) + for _, e := range idx.Entries { + entries[e.Name] = e + } + return &indexBuilder{ + entries: entries, + } +} + +func (b *indexBuilder) Write(idx *index.Index) { + idx.Entries = idx.Entries[:0] + for _, e := range b.entries { + idx.Entries = append(idx.Entries, e) + } +} + +func (b *indexBuilder) Add(e *index.Entry) { + b.entries[e.Name] = e +} + +func (b *indexBuilder) Remove(name string) { + delete(b.entries, filepath.ToSlash(name)) +} diff --git a/vendor/github.com/go-git/go-git/v5/worktree_bsd.go b/vendor/github.com/go-git/go-git/v5/worktree_bsd.go new file mode 100644 index 0000000000..d4ea327588 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/worktree_bsd.go @@ -0,0 +1,26 @@ +// +build darwin freebsd netbsd + +package git + +import ( + "syscall" + "time" + + "github.com/go-git/go-git/v5/plumbing/format/index" +) + +func init() { + fillSystemInfo = func(e *index.Entry, sys interface{}) { + if os, ok := sys.(*syscall.Stat_t); ok { + e.CreatedAt = time.Unix(int64(os.Atimespec.Sec), int64(os.Atimespec.Nsec)) + e.Dev = uint32(os.Dev) + e.Inode = uint32(os.Ino) + e.GID = os.Gid + e.UID = os.Uid + } + } +} + +func isSymlinkWindowsNonAdmin(err error) bool { + return false +} diff --git a/vendor/github.com/go-git/go-git/v5/worktree_commit.go b/vendor/github.com/go-git/go-git/v5/worktree_commit.go new file mode 100644 index 0000000000..a9d0e0409c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/worktree_commit.go @@ -0,0 +1,234 @@ +package git + +import ( + "bytes" + "path" + "sort" + "strings" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/filemode" + "github.com/go-git/go-git/v5/plumbing/format/index" + "github.com/go-git/go-git/v5/plumbing/object" + "github.com/go-git/go-git/v5/storage" + + "github.com/go-git/go-billy/v5" + "golang.org/x/crypto/openpgp" +) + +// Commit stores the current contents of the index in a new commit along with +// a log message from the user describing the changes. +func (w *Worktree) Commit(msg string, opts *CommitOptions) (plumbing.Hash, error) { + if err := opts.Validate(w.r); err != nil { + return plumbing.ZeroHash, err + } + + if opts.All { + if err := w.autoAddModifiedAndDeleted(); err != nil { + return plumbing.ZeroHash, err + } + } + + idx, err := w.r.Storer.Index() + if err != nil { + return plumbing.ZeroHash, err + } + + h := &buildTreeHelper{ + fs: w.Filesystem, + s: w.r.Storer, + } + + tree, err := h.BuildTree(idx) + if err != nil { + return plumbing.ZeroHash, err + } + + commit, err := w.buildCommitObject(msg, opts, tree) + if err != nil { + return plumbing.ZeroHash, err + } + + return commit, w.updateHEAD(commit) +} + +func (w *Worktree) autoAddModifiedAndDeleted() error { + s, err := w.Status() + if err != nil { + return err + } + + idx, err := w.r.Storer.Index() + if err != nil { + return err + } + + for path, fs := range s { + if fs.Worktree != Modified && fs.Worktree != Deleted { + continue + } + + if _, _, err := w.doAddFile(idx, s, path, nil); err != nil { + return err + } + + } + + return w.r.Storer.SetIndex(idx) +} + +func (w *Worktree) updateHEAD(commit plumbing.Hash) error { + head, err := w.r.Storer.Reference(plumbing.HEAD) + if err != nil { + return err + } + + name := plumbing.HEAD + if head.Type() != plumbing.HashReference { + name = head.Target() + } + + ref := plumbing.NewHashReference(name, commit) + return w.r.Storer.SetReference(ref) +} + +func (w *Worktree) buildCommitObject(msg string, opts *CommitOptions, tree plumbing.Hash) (plumbing.Hash, error) { + commit := &object.Commit{ + Author: *opts.Author, + Committer: *opts.Committer, + Message: msg, + TreeHash: tree, + ParentHashes: opts.Parents, + } + + if opts.SignKey != nil { + sig, err := w.buildCommitSignature(commit, opts.SignKey) + if err != nil { + return plumbing.ZeroHash, err + } + commit.PGPSignature = sig + } + + obj := w.r.Storer.NewEncodedObject() + if err := commit.Encode(obj); err != nil { + return plumbing.ZeroHash, err + } + return w.r.Storer.SetEncodedObject(obj) +} + +func (w *Worktree) buildCommitSignature(commit *object.Commit, signKey *openpgp.Entity) (string, error) { + encoded := &plumbing.MemoryObject{} + if err := commit.Encode(encoded); err != nil { + return "", err + } + r, err := encoded.Reader() + if err != nil { + return "", err + } + var b bytes.Buffer + if err := openpgp.ArmoredDetachSign(&b, signKey, r, nil); err != nil { + return "", err + } + return b.String(), nil +} + +// buildTreeHelper converts a given index.Index file into multiple git objects +// reading the blobs from the given filesystem and creating the trees from the +// index structure. The created objects are pushed to a given Storer. +type buildTreeHelper struct { + fs billy.Filesystem + s storage.Storer + + trees map[string]*object.Tree + entries map[string]*object.TreeEntry +} + +// BuildTree builds the tree objects and push its to the storer, the hash +// of the root tree is returned. +func (h *buildTreeHelper) BuildTree(idx *index.Index) (plumbing.Hash, error) { + const rootNode = "" + h.trees = map[string]*object.Tree{rootNode: {}} + h.entries = map[string]*object.TreeEntry{} + + for _, e := range idx.Entries { + if err := h.commitIndexEntry(e); err != nil { + return plumbing.ZeroHash, err + } + } + + return h.copyTreeToStorageRecursive(rootNode, h.trees[rootNode]) +} + +func (h *buildTreeHelper) commitIndexEntry(e *index.Entry) error { + parts := strings.Split(e.Name, "/") + + var fullpath string + for _, part := range parts { + parent := fullpath + fullpath = path.Join(fullpath, part) + + h.doBuildTree(e, parent, fullpath) + } + + return nil +} + +func (h *buildTreeHelper) doBuildTree(e *index.Entry, parent, fullpath string) { + if _, ok := h.trees[fullpath]; ok { + return + } + + if _, ok := h.entries[fullpath]; ok { + return + } + + te := object.TreeEntry{Name: path.Base(fullpath)} + + if fullpath == e.Name { + te.Mode = e.Mode + te.Hash = e.Hash + } else { + te.Mode = filemode.Dir + h.trees[fullpath] = &object.Tree{} + } + + h.trees[parent].Entries = append(h.trees[parent].Entries, te) +} + +type sortableEntries []object.TreeEntry + +func (sortableEntries) sortName(te object.TreeEntry) string { + if te.Mode == filemode.Dir { + return te.Name + "/" + } + return te.Name +} +func (se sortableEntries) Len() int { return len(se) } +func (se sortableEntries) Less(i int, j int) bool { return se.sortName(se[i]) < se.sortName(se[j]) } +func (se sortableEntries) Swap(i int, j int) { se[i], se[j] = se[j], se[i] } + +func (h *buildTreeHelper) copyTreeToStorageRecursive(parent string, t *object.Tree) (plumbing.Hash, error) { + sort.Sort(sortableEntries(t.Entries)) + for i, e := range t.Entries { + if e.Mode != filemode.Dir && !e.Hash.IsZero() { + continue + } + + path := path.Join(parent, e.Name) + + var err error + e.Hash, err = h.copyTreeToStorageRecursive(path, h.trees[path]) + if err != nil { + return plumbing.ZeroHash, err + } + + t.Entries[i] = e + } + + o := h.s.NewEncodedObject() + if err := t.Encode(o); err != nil { + return plumbing.ZeroHash, err + } + + return h.s.SetEncodedObject(o) +} diff --git a/vendor/github.com/go-git/go-git/v5/worktree_linux.go b/vendor/github.com/go-git/go-git/v5/worktree_linux.go new file mode 100644 index 0000000000..cf0db25242 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/worktree_linux.go @@ -0,0 +1,26 @@ +// +build linux + +package git + +import ( + "syscall" + "time" + + "github.com/go-git/go-git/v5/plumbing/format/index" +) + +func init() { + fillSystemInfo = func(e *index.Entry, sys interface{}) { + if os, ok := sys.(*syscall.Stat_t); ok { + e.CreatedAt = time.Unix(int64(os.Ctim.Sec), int64(os.Ctim.Nsec)) + e.Dev = uint32(os.Dev) + e.Inode = uint32(os.Ino) + e.GID = os.Gid + e.UID = os.Uid + } + } +} + +func isSymlinkWindowsNonAdmin(err error) bool { + return false +} diff --git a/vendor/github.com/go-git/go-git/v5/worktree_plan9.go b/vendor/github.com/go-git/go-git/v5/worktree_plan9.go new file mode 100644 index 0000000000..8cedf71a32 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/worktree_plan9.go @@ -0,0 +1,31 @@ +package git + +import ( + "syscall" + "time" + + "github.com/go-git/go-git/v5/plumbing/format/index" +) + +func init() { + fillSystemInfo = func(e *index.Entry, sys interface{}) { + if os, ok := sys.(*syscall.Dir); ok { + // Plan 9 doesn't have a CreatedAt field. + e.CreatedAt = time.Unix(int64(os.Mtime), 0) + + e.Dev = uint32(os.Dev) + + // Plan 9 has no Inode. + // ext2srv(4) appears to store Inode in Qid.Path. + e.Inode = uint32(os.Qid.Path) + + // Plan 9 has string UID/GID + e.GID = 0 + e.UID = 0 + } + } +} + +func isSymlinkWindowsNonAdmin(err error) bool { + return true +} diff --git a/vendor/github.com/go-git/go-git/v5/worktree_status.go b/vendor/github.com/go-git/go-git/v5/worktree_status.go new file mode 100644 index 0000000000..c639f13208 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/worktree_status.go @@ -0,0 +1,708 @@ +package git + +import ( + "bytes" + "errors" + "io" + "os" + "path" + "path/filepath" + "strings" + + "github.com/go-git/go-billy/v5/util" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/filemode" + "github.com/go-git/go-git/v5/plumbing/format/gitignore" + "github.com/go-git/go-git/v5/plumbing/format/index" + "github.com/go-git/go-git/v5/plumbing/object" + "github.com/go-git/go-git/v5/utils/ioutil" + "github.com/go-git/go-git/v5/utils/merkletrie" + "github.com/go-git/go-git/v5/utils/merkletrie/filesystem" + mindex "github.com/go-git/go-git/v5/utils/merkletrie/index" + "github.com/go-git/go-git/v5/utils/merkletrie/noder" +) + +var ( + // ErrDestinationExists in an Move operation means that the target exists on + // the worktree. + ErrDestinationExists = errors.New("destination exists") + // ErrGlobNoMatches in an AddGlob if the glob pattern does not match any + // files in the worktree. + ErrGlobNoMatches = errors.New("glob pattern did not match any files") +) + +// Status returns the working tree status. +func (w *Worktree) Status() (Status, error) { + var hash plumbing.Hash + + ref, err := w.r.Head() + if err != nil && err != plumbing.ErrReferenceNotFound { + return nil, err + } + + if err == nil { + hash = ref.Hash() + } + + return w.status(hash) +} + +func (w *Worktree) status(commit plumbing.Hash) (Status, error) { + s := make(Status) + + left, err := w.diffCommitWithStaging(commit, false) + if err != nil { + return nil, err + } + + for _, ch := range left { + a, err := ch.Action() + if err != nil { + return nil, err + } + + fs := s.File(nameFromAction(&ch)) + fs.Worktree = Unmodified + + switch a { + case merkletrie.Delete: + s.File(ch.From.String()).Staging = Deleted + case merkletrie.Insert: + s.File(ch.To.String()).Staging = Added + case merkletrie.Modify: + s.File(ch.To.String()).Staging = Modified + } + } + + right, err := w.diffStagingWithWorktree(false) + if err != nil { + return nil, err + } + + for _, ch := range right { + a, err := ch.Action() + if err != nil { + return nil, err + } + + fs := s.File(nameFromAction(&ch)) + if fs.Staging == Untracked { + fs.Staging = Unmodified + } + + switch a { + case merkletrie.Delete: + fs.Worktree = Deleted + case merkletrie.Insert: + fs.Worktree = Untracked + fs.Staging = Untracked + case merkletrie.Modify: + fs.Worktree = Modified + } + } + + return s, nil +} + +func nameFromAction(ch *merkletrie.Change) string { + name := ch.To.String() + if name == "" { + return ch.From.String() + } + + return name +} + +func (w *Worktree) diffStagingWithWorktree(reverse bool) (merkletrie.Changes, error) { + idx, err := w.r.Storer.Index() + if err != nil { + return nil, err + } + + from := mindex.NewRootNode(idx) + submodules, err := w.getSubmodulesStatus() + if err != nil { + return nil, err + } + + to := filesystem.NewRootNode(w.Filesystem, submodules) + + var c merkletrie.Changes + if reverse { + c, err = merkletrie.DiffTree(to, from, diffTreeIsEquals) + } else { + c, err = merkletrie.DiffTree(from, to, diffTreeIsEquals) + } + + if err != nil { + return nil, err + } + + return w.excludeIgnoredChanges(c), nil +} + +func (w *Worktree) excludeIgnoredChanges(changes merkletrie.Changes) merkletrie.Changes { + patterns, err := gitignore.ReadPatterns(w.Filesystem, nil) + if err != nil { + return changes + } + + patterns = append(patterns, w.Excludes...) + + if len(patterns) == 0 { + return changes + } + + m := gitignore.NewMatcher(patterns) + + var res merkletrie.Changes + for _, ch := range changes { + var path []string + for _, n := range ch.To { + path = append(path, n.Name()) + } + if len(path) == 0 { + for _, n := range ch.From { + path = append(path, n.Name()) + } + } + if len(path) != 0 { + isDir := (len(ch.To) > 0 && ch.To.IsDir()) || (len(ch.From) > 0 && ch.From.IsDir()) + if m.Match(path, isDir) { + continue + } + } + res = append(res, ch) + } + return res +} + +func (w *Worktree) getSubmodulesStatus() (map[string]plumbing.Hash, error) { + o := map[string]plumbing.Hash{} + + sub, err := w.Submodules() + if err != nil { + return nil, err + } + + status, err := sub.Status() + if err != nil { + return nil, err + } + + for _, s := range status { + if s.Current.IsZero() { + o[s.Path] = s.Expected + continue + } + + o[s.Path] = s.Current + } + + return o, nil +} + +func (w *Worktree) diffCommitWithStaging(commit plumbing.Hash, reverse bool) (merkletrie.Changes, error) { + var t *object.Tree + if !commit.IsZero() { + c, err := w.r.CommitObject(commit) + if err != nil { + return nil, err + } + + t, err = c.Tree() + if err != nil { + return nil, err + } + } + + return w.diffTreeWithStaging(t, reverse) +} + +func (w *Worktree) diffTreeWithStaging(t *object.Tree, reverse bool) (merkletrie.Changes, error) { + var from noder.Noder + if t != nil { + from = object.NewTreeRootNode(t) + } + + idx, err := w.r.Storer.Index() + if err != nil { + return nil, err + } + + to := mindex.NewRootNode(idx) + + if reverse { + return merkletrie.DiffTree(to, from, diffTreeIsEquals) + } + + return merkletrie.DiffTree(from, to, diffTreeIsEquals) +} + +var emptyNoderHash = make([]byte, 24) + +// diffTreeIsEquals is a implementation of noder.Equals, used to compare +// noder.Noder, it compare the content and the length of the hashes. +// +// Since some of the noder.Noder implementations doesn't compute a hash for +// some directories, if any of the hashes is a 24-byte slice of zero values +// the comparison is not done and the hashes are take as different. +func diffTreeIsEquals(a, b noder.Hasher) bool { + hashA := a.Hash() + hashB := b.Hash() + + if bytes.Equal(hashA, emptyNoderHash) || bytes.Equal(hashB, emptyNoderHash) { + return false + } + + return bytes.Equal(hashA, hashB) +} + +// Add adds the file contents of a file in the worktree to the index. if the +// file is already staged in the index no error is returned. If a file deleted +// from the Workspace is given, the file is removed from the index. If a +// directory given, adds the files and all his sub-directories recursively in +// the worktree to the index. If any of the files is already staged in the index +// no error is returned. When path is a file, the blob.Hash is returned. +func (w *Worktree) Add(path string) (plumbing.Hash, error) { + // TODO(mcuadros): deprecate in favor of AddWithOption in v6. + return w.doAdd(path, make([]gitignore.Pattern, 0)) +} + +func (w *Worktree) doAddDirectory(idx *index.Index, s Status, directory string, ignorePattern []gitignore.Pattern) (added bool, err error) { + files, err := w.Filesystem.ReadDir(directory) + if err != nil { + return false, err + } + if len(ignorePattern) > 0 { + m := gitignore.NewMatcher(ignorePattern) + matchPath := strings.Split(directory, string(os.PathSeparator)) + if m.Match(matchPath, true) { + // ignore + return false, nil + } + } + + for _, file := range files { + name := path.Join(directory, file.Name()) + + var a bool + if file.IsDir() { + if file.Name() == GitDirName { + // ignore special git directory + continue + } + a, err = w.doAddDirectory(idx, s, name, ignorePattern) + } else { + a, _, err = w.doAddFile(idx, s, name, ignorePattern) + } + + if err != nil { + return + } + + if !added && a { + added = true + } + } + + return +} + +// AddWithOptions file contents to the index, updates the index using the +// current content found in the working tree, to prepare the content staged for +// the next commit. +// +// It typically adds the current content of existing paths as a whole, but with +// some options it can also be used to add content with only part of the changes +// made to the working tree files applied, or remove paths that do not exist in +// the working tree anymore. +func (w *Worktree) AddWithOptions(opts *AddOptions) error { + if err := opts.Validate(w.r); err != nil { + return err + } + + if opts.All { + _, err := w.doAdd(".", w.Excludes) + return err + } + + if opts.Glob != "" { + return w.AddGlob(opts.Glob) + } + + _, err := w.Add(opts.Path) + return err +} + +func (w *Worktree) doAdd(path string, ignorePattern []gitignore.Pattern) (plumbing.Hash, error) { + s, err := w.Status() + if err != nil { + return plumbing.ZeroHash, err + } + + idx, err := w.r.Storer.Index() + if err != nil { + return plumbing.ZeroHash, err + } + + var h plumbing.Hash + var added bool + + fi, err := w.Filesystem.Lstat(path) + if err != nil || !fi.IsDir() { + added, h, err = w.doAddFile(idx, s, path, ignorePattern) + } else { + added, err = w.doAddDirectory(idx, s, path, ignorePattern) + } + + if err != nil { + return h, err + } + + if !added { + return h, nil + } + + return h, w.r.Storer.SetIndex(idx) +} + +// AddGlob adds all paths, matching pattern, to the index. If pattern matches a +// directory path, all directory contents are added to the index recursively. No +// error is returned if all matching paths are already staged in index. +func (w *Worktree) AddGlob(pattern string) error { + // TODO(mcuadros): deprecate in favor of AddWithOption in v6. + files, err := util.Glob(w.Filesystem, pattern) + if err != nil { + return err + } + + if len(files) == 0 { + return ErrGlobNoMatches + } + + s, err := w.Status() + if err != nil { + return err + } + + idx, err := w.r.Storer.Index() + if err != nil { + return err + } + + var saveIndex bool + for _, file := range files { + fi, err := w.Filesystem.Lstat(file) + if err != nil { + return err + } + + var added bool + if fi.IsDir() { + added, err = w.doAddDirectory(idx, s, file, make([]gitignore.Pattern, 0)) + } else { + added, _, err = w.doAddFile(idx, s, file, make([]gitignore.Pattern, 0)) + } + + if err != nil { + return err + } + + if !saveIndex && added { + saveIndex = true + } + } + + if saveIndex { + return w.r.Storer.SetIndex(idx) + } + + return nil +} + +// doAddFile create a new blob from path and update the index, added is true if +// the file added is different from the index. +func (w *Worktree) doAddFile(idx *index.Index, s Status, path string, ignorePattern []gitignore.Pattern) (added bool, h plumbing.Hash, err error) { + if s.File(path).Worktree == Unmodified { + return false, h, nil + } + if len(ignorePattern) > 0 { + m := gitignore.NewMatcher(ignorePattern) + matchPath := strings.Split(path, string(os.PathSeparator)) + if m.Match(matchPath, true) { + // ignore + return false, h, nil + } + } + + h, err = w.copyFileToStorage(path) + if err != nil { + if os.IsNotExist(err) { + added = true + h, err = w.deleteFromIndex(idx, path) + } + + return + } + + if err := w.addOrUpdateFileToIndex(idx, path, h); err != nil { + return false, h, err + } + + return true, h, err +} + +func (w *Worktree) copyFileToStorage(path string) (hash plumbing.Hash, err error) { + fi, err := w.Filesystem.Lstat(path) + if err != nil { + return plumbing.ZeroHash, err + } + + obj := w.r.Storer.NewEncodedObject() + obj.SetType(plumbing.BlobObject) + obj.SetSize(fi.Size()) + + writer, err := obj.Writer() + if err != nil { + return plumbing.ZeroHash, err + } + + defer ioutil.CheckClose(writer, &err) + + if fi.Mode()&os.ModeSymlink != 0 { + err = w.fillEncodedObjectFromSymlink(writer, path, fi) + } else { + err = w.fillEncodedObjectFromFile(writer, path, fi) + } + + if err != nil { + return plumbing.ZeroHash, err + } + + return w.r.Storer.SetEncodedObject(obj) +} + +func (w *Worktree) fillEncodedObjectFromFile(dst io.Writer, path string, fi os.FileInfo) (err error) { + src, err := w.Filesystem.Open(path) + if err != nil { + return err + } + + defer ioutil.CheckClose(src, &err) + + if _, err := io.Copy(dst, src); err != nil { + return err + } + + return err +} + +func (w *Worktree) fillEncodedObjectFromSymlink(dst io.Writer, path string, fi os.FileInfo) error { + target, err := w.Filesystem.Readlink(path) + if err != nil { + return err + } + + _, err = dst.Write([]byte(target)) + return err +} + +func (w *Worktree) addOrUpdateFileToIndex(idx *index.Index, filename string, h plumbing.Hash) error { + e, err := idx.Entry(filename) + if err != nil && err != index.ErrEntryNotFound { + return err + } + + if err == index.ErrEntryNotFound { + return w.doAddFileToIndex(idx, filename, h) + } + + return w.doUpdateFileToIndex(e, filename, h) +} + +func (w *Worktree) doAddFileToIndex(idx *index.Index, filename string, h plumbing.Hash) error { + return w.doUpdateFileToIndex(idx.Add(filename), filename, h) +} + +func (w *Worktree) doUpdateFileToIndex(e *index.Entry, filename string, h plumbing.Hash) error { + info, err := w.Filesystem.Lstat(filename) + if err != nil { + return err + } + + e.Hash = h + e.ModifiedAt = info.ModTime() + e.Mode, err = filemode.NewFromOSFileMode(info.Mode()) + if err != nil { + return err + } + + if e.Mode.IsRegular() { + e.Size = uint32(info.Size()) + } + + fillSystemInfo(e, info.Sys()) + return nil +} + +// Remove removes files from the working tree and from the index. +func (w *Worktree) Remove(path string) (plumbing.Hash, error) { + // TODO(mcuadros): remove plumbing.Hash from signature at v5. + idx, err := w.r.Storer.Index() + if err != nil { + return plumbing.ZeroHash, err + } + + var h plumbing.Hash + + fi, err := w.Filesystem.Lstat(path) + if err != nil || !fi.IsDir() { + h, err = w.doRemoveFile(idx, path) + } else { + _, err = w.doRemoveDirectory(idx, path) + } + if err != nil { + return h, err + } + + return h, w.r.Storer.SetIndex(idx) +} + +func (w *Worktree) doRemoveDirectory(idx *index.Index, directory string) (removed bool, err error) { + files, err := w.Filesystem.ReadDir(directory) + if err != nil { + return false, err + } + + for _, file := range files { + name := path.Join(directory, file.Name()) + + var r bool + if file.IsDir() { + r, err = w.doRemoveDirectory(idx, name) + } else { + _, err = w.doRemoveFile(idx, name) + if err == index.ErrEntryNotFound { + err = nil + } + } + + if err != nil { + return + } + + if !removed && r { + removed = true + } + } + + err = w.removeEmptyDirectory(directory) + return +} + +func (w *Worktree) removeEmptyDirectory(path string) error { + files, err := w.Filesystem.ReadDir(path) + if err != nil { + return err + } + + if len(files) != 0 { + return nil + } + + return w.Filesystem.Remove(path) +} + +func (w *Worktree) doRemoveFile(idx *index.Index, path string) (plumbing.Hash, error) { + hash, err := w.deleteFromIndex(idx, path) + if err != nil { + return plumbing.ZeroHash, err + } + + return hash, w.deleteFromFilesystem(path) +} + +func (w *Worktree) deleteFromIndex(idx *index.Index, path string) (plumbing.Hash, error) { + e, err := idx.Remove(path) + if err != nil { + return plumbing.ZeroHash, err + } + + return e.Hash, nil +} + +func (w *Worktree) deleteFromFilesystem(path string) error { + err := w.Filesystem.Remove(path) + if os.IsNotExist(err) { + return nil + } + + return err +} + +// RemoveGlob removes all paths, matching pattern, from the index. If pattern +// matches a directory path, all directory contents are removed from the index +// recursively. +func (w *Worktree) RemoveGlob(pattern string) error { + idx, err := w.r.Storer.Index() + if err != nil { + return err + } + + entries, err := idx.Glob(pattern) + if err != nil { + return err + } + + for _, e := range entries { + file := filepath.FromSlash(e.Name) + if _, err := w.Filesystem.Lstat(file); err != nil && !os.IsNotExist(err) { + return err + } + + if _, err := w.doRemoveFile(idx, file); err != nil { + return err + } + + dir, _ := filepath.Split(file) + if err := w.removeEmptyDirectory(dir); err != nil { + return err + } + } + + return w.r.Storer.SetIndex(idx) +} + +// Move moves or rename a file in the worktree and the index, directories are +// not supported. +func (w *Worktree) Move(from, to string) (plumbing.Hash, error) { + // TODO(mcuadros): support directories and/or implement support for glob + if _, err := w.Filesystem.Lstat(from); err != nil { + return plumbing.ZeroHash, err + } + + if _, err := w.Filesystem.Lstat(to); err == nil { + return plumbing.ZeroHash, ErrDestinationExists + } + + idx, err := w.r.Storer.Index() + if err != nil { + return plumbing.ZeroHash, err + } + + hash, err := w.deleteFromIndex(idx, from) + if err != nil { + return plumbing.ZeroHash, err + } + + if err := w.Filesystem.Rename(from, to); err != nil { + return hash, err + } + + if err := w.addOrUpdateFileToIndex(idx, to, hash); err != nil { + return hash, err + } + + return hash, w.r.Storer.SetIndex(idx) +} diff --git a/vendor/github.com/go-git/go-git/v5/worktree_unix_other.go b/vendor/github.com/go-git/go-git/v5/worktree_unix_other.go new file mode 100644 index 0000000000..f45966be96 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/worktree_unix_other.go @@ -0,0 +1,26 @@ +// +build openbsd dragonfly solaris + +package git + +import ( + "syscall" + "time" + + "github.com/go-git/go-git/v5/plumbing/format/index" +) + +func init() { + fillSystemInfo = func(e *index.Entry, sys interface{}) { + if os, ok := sys.(*syscall.Stat_t); ok { + e.CreatedAt = time.Unix(int64(os.Atim.Sec), int64(os.Atim.Nsec)) + e.Dev = uint32(os.Dev) + e.Inode = uint32(os.Ino) + e.GID = os.Gid + e.UID = os.Uid + } + } +} + +func isSymlinkWindowsNonAdmin(err error) bool { + return false +} diff --git a/vendor/github.com/go-git/go-git/v5/worktree_windows.go b/vendor/github.com/go-git/go-git/v5/worktree_windows.go new file mode 100644 index 0000000000..1928f9712e --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/worktree_windows.go @@ -0,0 +1,35 @@ +// +build windows + +package git + +import ( + "os" + "syscall" + "time" + + "github.com/go-git/go-git/v5/plumbing/format/index" +) + +func init() { + fillSystemInfo = func(e *index.Entry, sys interface{}) { + if os, ok := sys.(*syscall.Win32FileAttributeData); ok { + seconds := os.CreationTime.Nanoseconds() / 1000000000 + nanoseconds := os.CreationTime.Nanoseconds() - seconds*1000000000 + e.CreatedAt = time.Unix(seconds, nanoseconds) + } + } +} + +func isSymlinkWindowsNonAdmin(err error) bool { + const ERROR_PRIVILEGE_NOT_HELD syscall.Errno = 1314 + + if err != nil { + if errLink, ok := err.(*os.LinkError); ok { + if errNo, ok := errLink.Err.(syscall.Errno); ok { + return errNo == ERROR_PRIVILEGE_NOT_HELD + } + } + } + + return false +} diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml new file mode 100644 index 0000000000..94ff801df1 --- /dev/null +++ b/vendor/github.com/go-logr/logr/.golangci.yaml @@ -0,0 +1,29 @@ +run: + timeout: 1m + tests: true + +linters: + disable-all: true + enable: + - asciicheck + - deadcode + - errcheck + - forcetypeassert + - gocritic + - gofmt + - goimports + - gosimple + - govet + - ineffassign + - misspell + - revive + - staticcheck + - structcheck + - typecheck + - unused + - varcheck + +issues: + exclude-use-default: false + max-issues-per-linter: 0 + max-same-issues: 10 diff --git a/vendor/github.com/go-logr/logr/CHANGELOG.md b/vendor/github.com/go-logr/logr/CHANGELOG.md new file mode 100644 index 0000000000..c356960046 --- /dev/null +++ b/vendor/github.com/go-logr/logr/CHANGELOG.md @@ -0,0 +1,6 @@ +# CHANGELOG + +## v1.0.0-rc1 + +This is the first logged release. Major changes (including breaking changes) +have occurred since earlier tags. diff --git a/vendor/github.com/go-logr/logr/CONTRIBUTING.md b/vendor/github.com/go-logr/logr/CONTRIBUTING.md new file mode 100644 index 0000000000..5d37e294c5 --- /dev/null +++ b/vendor/github.com/go-logr/logr/CONTRIBUTING.md @@ -0,0 +1,17 @@ +# Contributing + +Logr is open to pull-requests, provided they fit within the intended scope of +the project. Specifically, this library aims to be VERY small and minimalist, +with no external dependencies. + +## Compatibility + +This project intends to follow [semantic versioning](http://semver.org) and +is very strict about compatibility. Any proposed changes MUST follow those +rules. + +## Performance + +As a logging library, logr must be as light-weight as possible. Any proposed +code change must include results of running the [benchmark](./benchmark) +before and after the change. diff --git a/vendor/github.com/go-logr/logr/LICENSE b/vendor/github.com/go-logr/logr/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/vendor/github.com/go-logr/logr/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md new file mode 100644 index 0000000000..ab59311813 --- /dev/null +++ b/vendor/github.com/go-logr/logr/README.md @@ -0,0 +1,282 @@ +# A minimal logging API for Go + +[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) + +logr offers an(other) opinion on how Go programs and libraries can do logging +without becoming coupled to a particular logging implementation. This is not +an implementation of logging - it is an API. In fact it is two APIs with two +different sets of users. + +The `Logger` type is intended for application and library authors. It provides +a relatively small API which can be used everywhere you want to emit logs. It +defers the actual act of writing logs (to files, to stdout, or whatever) to the +`LogSink` interface. + +The `LogSink` interface is intended for logging library implementers. It is a +pure interface which can be implemented by logging frameworks to provide the actual logging +functionality. + +This decoupling allows application and library developers to write code in +terms of `logr.Logger` (which has very low dependency fan-out) while the +implementation of logging is managed "up stack" (e.g. in or near `main()`.) +Application developers can then switch out implementations as necessary. + +Many people assert that libraries should not be logging, and as such efforts +like this are pointless. Those people are welcome to convince the authors of +the tens-of-thousands of libraries that *DO* write logs that they are all +wrong. In the meantime, logr takes a more practical approach. + +## Typical usage + +Somewhere, early in an application's life, it will make a decision about which +logging library (implementation) it actually wants to use. Something like: + +``` + func main() { + // ... other setup code ... + + // Create the "root" logger. We have chosen the "logimpl" implementation, + // which takes some initial parameters and returns a logr.Logger. + logger := logimpl.New(param1, param2) + + // ... other setup code ... +``` + +Most apps will call into other libraries, create structures to govern the flow, +etc. The `logr.Logger` object can be passed to these other libraries, stored +in structs, or even used as a package-global variable, if needed. For example: + +``` + app := createTheAppObject(logger) + app.Run() +``` + +Outside of this early setup, no other packages need to know about the choice of +implementation. They write logs in terms of the `logr.Logger` that they +received: + +``` + type appObject struct { + // ... other fields ... + logger logr.Logger + // ... other fields ... + } + + func (app *appObject) Run() { + app.logger.Info("starting up", "timestamp", time.Now()) + + // ... app code ... +``` + +## Background + +If the Go standard library had defined an interface for logging, this project +probably would not be needed. Alas, here we are. + +### Inspiration + +Before you consider this package, please read [this blog post by the +inimitable Dave Cheney][warning-makes-no-sense]. We really appreciate what +he has to say, and it largely aligns with our own experiences. + +### Differences from Dave's ideas + +The main differences are: + +1. Dave basically proposes doing away with the notion of a logging API in favor +of `fmt.Printf()`. We disagree, especially when you consider things like output +locations, timestamps, file and line decorations, and structured logging. This +package restricts the logging API to just 2 types of logs: info and error. + +Info logs are things you want to tell the user which are not errors. Error +logs are, well, errors. If your code receives an `error` from a subordinate +function call and is logging that `error` *and not returning it*, use error +logs. + +2. Verbosity-levels on info logs. This gives developers a chance to indicate +arbitrary grades of importance for info logs, without assigning names with +semantic meaning such as "warning", "trace", and "debug." Superficially this +may feel very similar, but the primary difference is the lack of semantics. +Because verbosity is a numerical value, it's safe to assume that an app running +with higher verbosity means more (and less important) logs will be generated. + +## Implementations (non-exhaustive) + +There are implementations for the following logging libraries: + +- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr) +- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr) +- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) +- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr) +- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting) +- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) +- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr) +- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) +- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend) +- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr) +- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr) +- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0) +- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing) + +## FAQ + +### Conceptual + +#### Why structured logging? + +- **Structured logs are more easily queryable**: Since you've got + key-value pairs, it's much easier to query your structured logs for + particular values by filtering on the contents of a particular key -- + think searching request logs for error codes, Kubernetes reconcilers for + the name and namespace of the reconciled object, etc. + +- **Structured logging makes it easier to have cross-referenceable logs**: + Similarly to searchability, if you maintain conventions around your + keys, it becomes easy to gather all log lines related to a particular + concept. + +- **Structured logs allow better dimensions of filtering**: if you have + structure to your logs, you've got more precise control over how much + information is logged -- you might choose in a particular configuration + to log certain keys but not others, only log lines where a certain key + matches a certain value, etc., instead of just having v-levels and names + to key off of. + +- **Structured logs better represent structured data**: sometimes, the + data that you want to log is inherently structured (think tuple-link + objects.) Structured logs allow you to preserve that structure when + outputting. + +#### Why V-levels? + +**V-levels give operators an easy way to control the chattiness of log +operations**. V-levels provide a way for a given package to distinguish +the relative importance or verbosity of a given log message. Then, if +a particular logger or package is logging too many messages, the user +of the package can simply change the v-levels for that library. + +#### Why not named levels, like Info/Warning/Error? + +Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences +from Dave's ideas](#differences-from-daves-ideas). + +#### Why not allow format strings, too? + +**Format strings negate many of the benefits of structured logs**: + +- They're not easily searchable without resorting to fuzzy searching, + regular expressions, etc. + +- They don't store structured data well, since contents are flattened into + a string. + +- They're not cross-referenceable. + +- They don't compress easily, since the message is not constant. + +(Unless you turn positional parameters into key-value pairs with numerical +keys, at which point you've gotten key-value logging with meaningless +keys.) + +### Practical + +#### Why key-value pairs, and not a map? + +Key-value pairs are *much* easier to optimize, especially around +allocations. Zap (a structured logger that inspired logr's interface) has +[performance measurements](https://github.com/uber-go/zap#performance) +that show this quite nicely. + +While the interface ends up being a little less obvious, you get +potentially better performance, plus avoid making users type +`map[string]string{}` every time they want to log. + +#### What if my V-levels differ between libraries? + +That's fine. Control your V-levels on a per-logger basis, and use the +`WithName` method to pass different loggers to different libraries. + +Generally, you should take care to ensure that you have relatively +consistent V-levels within a given logger, however, as this makes deciding +on what verbosity of logs to request easier. + +#### But I really want to use a format string! + +That's not actually a question. Assuming your question is "how do +I convert my mental model of logging with format strings to logging with +constant messages": + +1. Figure out what the error actually is, as you'd write in a TL;DR style, + and use that as a message. + +2. For every place you'd write a format specifier, look to the word before + it, and add that as a key value pair. + +For instance, consider the following examples (all taken from spots in the +Kubernetes codebase): + +- `klog.V(4).Infof("Client is returning errors: code %v, error %v", + responseCode, err)` becomes `logger.Error(err, "client returned an + error", "code", responseCode)` + +- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", + seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after + response when requesting url", "attempt", retries, "after + seconds", seconds, "url", url)` + +If you *really* must use a format string, use it in a key's value, and +call `fmt.Sprintf` yourself. For instance: `log.Printf("unable to +reflect over type %T")` becomes `logger.Info("unable to reflect over +type", "type", fmt.Sprintf("%T"))`. In general though, the cases where +this is necessary should be few and far between. + +#### How do I choose my V-levels? + +This is basically the only hard constraint: increase V-levels to denote +more verbose or more debug-y logs. + +Otherwise, you can start out with `0` as "you always want to see this", +`1` as "common logging that you might *possibly* want to turn off", and +`10` as "I would like to performance-test your log collection stack." + +Then gradually choose levels in between as you need them, working your way +down from 10 (for debug and trace style logs) and up from 1 (for chattier +info-type logs.) + +#### How do I choose my keys? + +Keys are fairly flexible, and can hold more or less any string +value. For best compatibility with implementations and consistency +with existing code in other projects, there are a few conventions you +should consider. + +- Make your keys human-readable. +- Constant keys are generally a good idea. +- Be consistent across your codebase. +- Keys should naturally match parts of the message string. +- Use lower case for simple keys and + [lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for + more complex ones. Kubernetes is one example of a project that has + [adopted that + convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments). + +While key names are mostly unrestricted (and spaces are acceptable), +it's generally a good idea to stick to printable ascii characters, or at +least match the general character set of your log lines. + +#### Why should keys be constant values? + +The point of structured logging is to make later log processing easier. Your +keys are, effectively, the schema of each log message. If you use different +keys across instances of the same log line, you will make your structured logs +much harder to use. `Sprintf()` is for values, not for keys! + +#### Why is this not a pure interface? + +The Logger type is implemented as a struct in order to allow the Go compiler to +optimize things like high-V `Info` logs that are not triggered. Not all of +these implementations are implemented yet, but this structure was suggested as +a way to ensure they *can* be implemented. All of the real work is behind the +`LogSink` interface. + +[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go new file mode 100644 index 0000000000..9d92a38f1d --- /dev/null +++ b/vendor/github.com/go-logr/logr/discard.go @@ -0,0 +1,54 @@ +/* +Copyright 2020 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +// Discard returns a Logger that discards all messages logged to it. It can be +// used whenever the caller is not interested in the logs. Logger instances +// produced by this function always compare as equal. +func Discard() Logger { + return Logger{ + level: 0, + sink: discardLogSink{}, + } +} + +// discardLogSink is a LogSink that discards all messages. +type discardLogSink struct{} + +// Verify that it actually implements the interface +var _ LogSink = discardLogSink{} + +func (l discardLogSink) Init(RuntimeInfo) { +} + +func (l discardLogSink) Enabled(int) bool { + return false +} + +func (l discardLogSink) Info(int, string, ...interface{}) { +} + +func (l discardLogSink) Error(error, string, ...interface{}) { +} + +func (l discardLogSink) WithValues(...interface{}) LogSink { + return l +} + +func (l discardLogSink) WithName(string) LogSink { + return l +} diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go new file mode 100644 index 0000000000..7accdb0c40 --- /dev/null +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -0,0 +1,787 @@ +/* +Copyright 2021 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package funcr implements formatting of structured log messages and +// optionally captures the call site and timestamp. +// +// The simplest way to use it is via its implementation of a +// github.com/go-logr/logr.LogSink with output through an arbitrary +// "write" function. See New and NewJSON for details. +// +// Custom LogSinks +// +// For users who need more control, a funcr.Formatter can be embedded inside +// your own custom LogSink implementation. This is useful when the LogSink +// needs to implement additional methods, for example. +// +// Formatting +// +// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for +// values which are being logged. When rendering a struct, funcr will use Go's +// standard JSON tags (all except "string"). +package funcr + +import ( + "bytes" + "encoding" + "fmt" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "time" + + "github.com/go-logr/logr" +) + +// New returns a logr.Logger which is implemented by an arbitrary function. +func New(fn func(prefix, args string), opts Options) logr.Logger { + return logr.New(newSink(fn, NewFormatter(opts))) +} + +// NewJSON returns a logr.Logger which is implemented by an arbitrary function +// and produces JSON output. +func NewJSON(fn func(obj string), opts Options) logr.Logger { + fnWrapper := func(_, obj string) { + fn(obj) + } + return logr.New(newSink(fnWrapper, NewFormatterJSON(opts))) +} + +// Underlier exposes access to the underlying logging function. Since +// callers only have a logr.Logger, they have to know which +// implementation is in use, so this interface is less of an +// abstraction and more of a way to test type conversion. +type Underlier interface { + GetUnderlying() func(prefix, args string) +} + +func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink { + l := &fnlogger{ + Formatter: formatter, + write: fn, + } + // For skipping fnlogger.Info and fnlogger.Error. + l.Formatter.AddCallDepth(1) + return l +} + +// Options carries parameters which influence the way logs are generated. +type Options struct { + // LogCaller tells funcr to add a "caller" key to some or all log lines. + // This has some overhead, so some users might not want it. + LogCaller MessageClass + + // LogCallerFunc tells funcr to also log the calling function name. This + // has no effect if caller logging is not enabled (see Options.LogCaller). + LogCallerFunc bool + + // LogTimestamp tells funcr to add a "ts" key to log lines. This has some + // overhead, so some users might not want it. + LogTimestamp bool + + // TimestampFormat tells funcr how to render timestamps when LogTimestamp + // is enabled. If not specified, a default format will be used. For more + // details, see docs for Go's time.Layout. + TimestampFormat string + + // Verbosity tells funcr which V logs to produce. Higher values enable + // more logs. Info logs at or below this level will be written, while logs + // above this level will be discarded. + Verbosity int + + // RenderBuiltinsHook allows users to mutate the list of key-value pairs + // while a log line is being rendered. The kvList argument follows logr + // conventions - each pair of slice elements is comprised of a string key + // and an arbitrary value (verified and sanitized before calling this + // hook). The value returned must follow the same conventions. This hook + // can be used to audit or modify logged data. For example, you might want + // to prefix all of funcr's built-in keys with some string. This hook is + // only called for built-in (provided by funcr itself) key-value pairs. + // Equivalent hooks are offered for key-value pairs saved via + // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and + // for user-provided pairs (see RenderArgsHook). + RenderBuiltinsHook func(kvList []interface{}) []interface{} + + // RenderValuesHook is the same as RenderBuiltinsHook, except that it is + // only called for key-value pairs saved via logr.Logger.WithValues. See + // RenderBuiltinsHook for more details. + RenderValuesHook func(kvList []interface{}) []interface{} + + // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only + // called for key-value pairs passed directly to Info and Error. See + // RenderBuiltinsHook for more details. + RenderArgsHook func(kvList []interface{}) []interface{} + + // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct + // that contains a struct, etc.) it may log. Every time it finds a struct, + // slice, array, or map the depth is increased by one. When the maximum is + // reached, the value will be converted to a string indicating that the max + // depth has been exceeded. If this field is not specified, a default + // value will be used. + MaxLogDepth int +} + +// MessageClass indicates which category or categories of messages to consider. +type MessageClass int + +const ( + // None ignores all message classes. + None MessageClass = iota + // All considers all message classes. + All + // Info only considers info messages. + Info + // Error only considers error messages. + Error +) + +// fnlogger inherits some of its LogSink implementation from Formatter +// and just needs to add some glue code. +type fnlogger struct { + Formatter + write func(prefix, args string) +} + +func (l fnlogger) WithName(name string) logr.LogSink { + l.Formatter.AddName(name) + return &l +} + +func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l +} + +func (l fnlogger) WithCallDepth(depth int) logr.LogSink { + l.Formatter.AddCallDepth(depth) + return &l +} + +func (l fnlogger) Info(level int, msg string, kvList ...interface{}) { + prefix, args := l.FormatInfo(level, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) Error(err error, msg string, kvList ...interface{}) { + prefix, args := l.FormatError(err, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) GetUnderlying() func(prefix, args string) { + return l.write +} + +// Assert conformance to the interfaces. +var _ logr.LogSink = &fnlogger{} +var _ logr.CallDepthLogSink = &fnlogger{} +var _ Underlier = &fnlogger{} + +// NewFormatter constructs a Formatter which emits a JSON-like key=value format. +func NewFormatter(opts Options) Formatter { + return newFormatter(opts, outputKeyValue) +} + +// NewFormatterJSON constructs a Formatter which emits strict JSON. +func NewFormatterJSON(opts Options) Formatter { + return newFormatter(opts, outputJSON) +} + +// Defaults for Options. +const defaultTimestampFormat = "2006-01-02 15:04:05.000000" +const defaultMaxLogDepth = 16 + +func newFormatter(opts Options, outfmt outputFormat) Formatter { + if opts.TimestampFormat == "" { + opts.TimestampFormat = defaultTimestampFormat + } + if opts.MaxLogDepth == 0 { + opts.MaxLogDepth = defaultMaxLogDepth + } + f := Formatter{ + outputFormat: outfmt, + prefix: "", + values: nil, + depth: 0, + opts: opts, + } + return f +} + +// Formatter is an opaque struct which can be embedded in a LogSink +// implementation. It should be constructed with NewFormatter. Some of +// its methods directly implement logr.LogSink. +type Formatter struct { + outputFormat outputFormat + prefix string + values []interface{} + valuesStr string + depth int + opts Options +} + +// outputFormat indicates which outputFormat to use. +type outputFormat int + +const ( + // outputKeyValue emits a JSON-like key=value format, but not strict JSON. + outputKeyValue outputFormat = iota + // outputJSON emits strict JSON. + outputJSON +) + +// PseudoStruct is a list of key-value pairs that gets logged as a struct. +type PseudoStruct []interface{} + +// render produces a log line, ready to use. +func (f Formatter) render(builtins, args []interface{}) string { + // Empirically bytes.Buffer is faster than strings.Builder for this. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + if f.outputFormat == outputJSON { + buf.WriteByte('{') + } + vals := builtins + if hook := f.opts.RenderBuiltinsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, false, false) // keys are ours, no need to escape + continuing := len(builtins) > 0 + if len(f.valuesStr) > 0 { + if continuing { + if f.outputFormat == outputJSON { + buf.WriteByte(',') + } else { + buf.WriteByte(' ') + } + } + continuing = true + buf.WriteString(f.valuesStr) + } + vals = args + if hook := f.opts.RenderArgsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, continuing, true) // escape user-provided keys + if f.outputFormat == outputJSON { + buf.WriteByte('}') + } + return buf.String() +} + +// flatten renders a list of key-value pairs into a buffer. If continuing is +// true, it assumes that the buffer has previous values and will emit a +// separator (which depends on the output format) before the first pair it +// writes. If escapeKeys is true, the keys are assumed to have +// non-JSON-compatible characters in them and must be evaluated for escapes. +// +// This function returns a potentially modified version of kvList, which +// ensures that there is a value for every key (adding a value if needed) and +// that each key is a string (substituting a key if needed). +func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} { + // This logic overlaps with sanitize() but saves one type-cast per key, + // which can be measurable. + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } + for i := 0; i < len(kvList); i += 2 { + k, ok := kvList[i].(string) + if !ok { + k = f.nonStringKey(kvList[i]) + kvList[i] = k + } + v := kvList[i+1] + + if i > 0 || continuing { + if f.outputFormat == outputJSON { + buf.WriteByte(',') + } else { + // In theory the format could be something we don't understand. In + // practice, we control it, so it won't be. + buf.WriteByte(' ') + } + } + + if escapeKeys { + buf.WriteString(prettyString(k)) + } else { + // this is faster + buf.WriteByte('"') + buf.WriteString(k) + buf.WriteByte('"') + } + if f.outputFormat == outputJSON { + buf.WriteByte(':') + } else { + buf.WriteByte('=') + } + buf.WriteString(f.pretty(v)) + } + return kvList +} + +func (f Formatter) pretty(value interface{}) string { + return f.prettyWithFlags(value, 0, 0) +} + +const ( + flagRawStruct = 0x1 // do not print braces on structs +) + +// TODO: This is not fast. Most of the overhead goes here. +func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string { + if depth > f.opts.MaxLogDepth { + return `""` + } + + // Handle types that take full control of logging. + if v, ok := value.(logr.Marshaler); ok { + // Replace the value with what the type wants to get logged. + // That then gets handled below via reflection. + value = invokeMarshaler(v) + } + + // Handle types that want to format themselves. + switch v := value.(type) { + case fmt.Stringer: + value = invokeStringer(v) + case error: + value = invokeError(v) + } + + // Handling the most common types without reflect is a small perf win. + switch v := value.(type) { + case bool: + return strconv.FormatBool(v) + case string: + return prettyString(v) + case int: + return strconv.FormatInt(int64(v), 10) + case int8: + return strconv.FormatInt(int64(v), 10) + case int16: + return strconv.FormatInt(int64(v), 10) + case int32: + return strconv.FormatInt(int64(v), 10) + case int64: + return strconv.FormatInt(int64(v), 10) + case uint: + return strconv.FormatUint(uint64(v), 10) + case uint8: + return strconv.FormatUint(uint64(v), 10) + case uint16: + return strconv.FormatUint(uint64(v), 10) + case uint32: + return strconv.FormatUint(uint64(v), 10) + case uint64: + return strconv.FormatUint(v, 10) + case uintptr: + return strconv.FormatUint(uint64(v), 10) + case float32: + return strconv.FormatFloat(float64(v), 'f', -1, 32) + case float64: + return strconv.FormatFloat(v, 'f', -1, 64) + case complex64: + return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"` + case complex128: + return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"` + case PseudoStruct: + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + v = f.sanitize(v) + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } + for i := 0; i < len(v); i += 2 { + if i > 0 { + buf.WriteByte(',') + } + k, _ := v[i].(string) // sanitize() above means no need to check success + // arbitrary keys might need escaping + buf.WriteString(prettyString(k)) + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) + } + if flags&flagRawStruct == 0 { + buf.WriteByte('}') + } + return buf.String() + } + + buf := bytes.NewBuffer(make([]byte, 0, 256)) + t := reflect.TypeOf(value) + if t == nil { + return "null" + } + v := reflect.ValueOf(value) + switch t.Kind() { + case reflect.Bool: + return strconv.FormatBool(v.Bool()) + case reflect.String: + return prettyString(v.String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(int64(v.Int()), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(uint64(v.Uint()), 10) + case reflect.Float32: + return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32) + case reflect.Float64: + return strconv.FormatFloat(v.Float(), 'f', -1, 64) + case reflect.Complex64: + return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"` + case reflect.Complex128: + return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"` + case reflect.Struct: + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } + for i := 0; i < t.NumField(); i++ { + fld := t.Field(i) + if fld.PkgPath != "" { + // reflect says this field is only defined for non-exported fields. + continue + } + if !v.Field(i).CanInterface() { + // reflect isn't clear exactly what this means, but we can't use it. + continue + } + name := "" + omitempty := false + if tag, found := fld.Tag.Lookup("json"); found { + if tag == "-" { + continue + } + if comma := strings.Index(tag, ","); comma != -1 { + if n := tag[:comma]; n != "" { + name = n + } + rest := tag[comma:] + if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") { + omitempty = true + } + } else { + name = tag + } + } + if omitempty && isEmpty(v.Field(i)) { + continue + } + if i > 0 { + buf.WriteByte(',') + } + if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1)) + continue + } + if name == "" { + name = fld.Name + } + // field names can't contain characters which need escaping + buf.WriteByte('"') + buf.WriteString(name) + buf.WriteByte('"') + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1)) + } + if flags&flagRawStruct == 0 { + buf.WriteByte('}') + } + return buf.String() + case reflect.Slice, reflect.Array: + buf.WriteByte('[') + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteByte(',') + } + e := v.Index(i) + buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1)) + } + buf.WriteByte(']') + return buf.String() + case reflect.Map: + buf.WriteByte('{') + // This does not sort the map keys, for best perf. + it := v.MapRange() + i := 0 + for it.Next() { + if i > 0 { + buf.WriteByte(',') + } + // If a map key supports TextMarshaler, use it. + keystr := "" + if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok { + txt, err := m.MarshalText() + if err != nil { + keystr = fmt.Sprintf("", err.Error()) + } else { + keystr = string(txt) + } + keystr = prettyString(keystr) + } else { + // prettyWithFlags will produce already-escaped values + keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1) + if t.Key().Kind() != reflect.String { + // JSON only does string keys. Unlike Go's standard JSON, we'll + // convert just about anything to a string. + keystr = prettyString(keystr) + } + } + buf.WriteString(keystr) + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1)) + i++ + } + buf.WriteByte('}') + return buf.String() + case reflect.Ptr, reflect.Interface: + if v.IsNil() { + return "null" + } + return f.prettyWithFlags(v.Elem().Interface(), 0, depth) + } + return fmt.Sprintf(`""`, t.Kind().String()) +} + +func prettyString(s string) string { + // Avoid escaping (which does allocations) if we can. + if needsEscape(s) { + return strconv.Quote(s) + } + b := bytes.NewBuffer(make([]byte, 0, 1024)) + b.WriteByte('"') + b.WriteString(s) + b.WriteByte('"') + return b.String() +} + +// needsEscape determines whether the input string needs to be escaped or not, +// without doing any allocations. +func needsEscape(s string) bool { + for _, r := range s { + if !strconv.IsPrint(r) || r == '\\' || r == '"' { + return true + } + } + return false +} + +func isEmpty(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Complex64, reflect.Complex128: + return v.Complex() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func invokeMarshaler(m logr.Marshaler) (ret interface{}) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return m.MarshalLog() +} + +func invokeStringer(s fmt.Stringer) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return s.String() +} + +func invokeError(e error) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return e.Error() +} + +// Caller represents the original call site for a log line, after considering +// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and +// Line fields will always be provided, while the Func field is optional. +// Users can set the render hook fields in Options to examine logged key-value +// pairs, one of which will be {"caller", Caller} if the Options.LogCaller +// field is enabled for the given MessageClass. +type Caller struct { + // File is the basename of the file for this call site. + File string `json:"file"` + // Line is the line number in the file for this call site. + Line int `json:"line"` + // Func is the function name for this call site, or empty if + // Options.LogCallerFunc is not enabled. + Func string `json:"function,omitempty"` +} + +func (f Formatter) caller() Caller { + // +1 for this frame, +1 for Info/Error. + pc, file, line, ok := runtime.Caller(f.depth + 2) + if !ok { + return Caller{"", 0, ""} + } + fn := "" + if f.opts.LogCallerFunc { + if fp := runtime.FuncForPC(pc); fp != nil { + fn = fp.Name() + } + } + + return Caller{filepath.Base(file), line, fn} +} + +const noValue = "" + +func (f Formatter) nonStringKey(v interface{}) string { + return fmt.Sprintf("", f.snippet(v)) +} + +// snippet produces a short snippet string of an arbitrary value. +func (f Formatter) snippet(v interface{}) string { + const snipLen = 16 + + snip := f.pretty(v) + if len(snip) > snipLen { + snip = snip[:snipLen] + } + return snip +} + +// sanitize ensures that a list of key-value pairs has a value for every key +// (adding a value if needed) and that each key is a string (substituting a key +// if needed). +func (f Formatter) sanitize(kvList []interface{}) []interface{} { + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } + for i := 0; i < len(kvList); i += 2 { + _, ok := kvList[i].(string) + if !ok { + kvList[i] = f.nonStringKey(kvList[i]) + } + } + return kvList +} + +// Init configures this Formatter from runtime info, such as the call depth +// imposed by logr itself. +// Note that this receiver is a pointer, so depth can be saved. +func (f *Formatter) Init(info logr.RuntimeInfo) { + f.depth += info.CallDepth +} + +// Enabled checks whether an info message at the given level should be logged. +func (f Formatter) Enabled(level int) bool { + return level <= f.opts.Verbosity +} + +// GetDepth returns the current depth of this Formatter. This is useful for +// implementations which do their own caller attribution. +func (f Formatter) GetDepth() int { + return f.depth +} + +// FormatInfo renders an Info log message into strings. The prefix will be +// empty when no names were set (via AddNames), or when the output is +// configured for JSON. +func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) { + args := make([]interface{}, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) + prefix = "" + } + if f.opts.LogTimestamp { + args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) + } + if policy := f.opts.LogCaller; policy == All || policy == Info { + args = append(args, "caller", f.caller()) + } + args = append(args, "level", level, "msg", msg) + return prefix, f.render(args, kvList) +} + +// FormatError renders an Error log message into strings. The prefix will be +// empty when no names were set (via AddNames), or when the output is +// configured for JSON. +func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) { + args := make([]interface{}, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) + prefix = "" + } + if f.opts.LogTimestamp { + args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) + } + if policy := f.opts.LogCaller; policy == All || policy == Error { + args = append(args, "caller", f.caller()) + } + args = append(args, "msg", msg) + var loggableErr interface{} + if err != nil { + loggableErr = err.Error() + } + args = append(args, "error", loggableErr) + return f.prefix, f.render(args, kvList) +} + +// AddName appends the specified name. funcr uses '/' characters to separate +// name elements. Callers should not pass '/' in the provided name string, but +// this library does not actually enforce that. +func (f *Formatter) AddName(name string) { + if len(f.prefix) > 0 { + f.prefix += "/" + } + f.prefix += name +} + +// AddValues adds key-value pairs to the set of saved values to be logged with +// each log line. +func (f *Formatter) AddValues(kvList []interface{}) { + // Three slice args forces a copy. + n := len(f.values) + f.values = append(f.values[:n:n], kvList...) + + vals := f.values + if hook := f.opts.RenderValuesHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + + // Pre-render values, so we don't have to do it on each Info/Error call. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + f.flatten(buf, vals, false, true) // escape user-provided keys + f.valuesStr = buf.String() +} + +// AddCallDepth increases the number of stack-frames to skip when attributing +// the log line to a file and line. +func (f *Formatter) AddCallDepth(depth int) { + f.depth += depth +} diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go new file mode 100644 index 0000000000..c3b56b3d2c --- /dev/null +++ b/vendor/github.com/go-logr/logr/logr.go @@ -0,0 +1,510 @@ +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This design derives from Dave Cheney's blog: +// http://dave.cheney.net/2015/11/05/lets-talk-about-logging + +// Package logr defines a general-purpose logging API and abstract interfaces +// to back that API. Packages in the Go ecosystem can depend on this package, +// while callers can implement logging with whatever backend is appropriate. +// +// Usage +// +// Logging is done using a Logger instance. Logger is a concrete type with +// methods, which defers the actual logging to a LogSink interface. The main +// methods of Logger are Info() and Error(). Arguments to Info() and Error() +// are key/value pairs rather than printf-style formatted strings, emphasizing +// "structured logging". +// +// With Go's standard log package, we might write: +// log.Printf("setting target value %s", targetValue) +// +// With logr's structured logging, we'd write: +// logger.Info("setting target", "value", targetValue) +// +// Errors are much the same. Instead of: +// log.Printf("failed to open the pod bay door for user %s: %v", user, err) +// +// We'd write: +// logger.Error(err, "failed to open the pod bay door", "user", user) +// +// Info() and Error() are very similar, but they are separate methods so that +// LogSink implementations can choose to do things like attach additional +// information (such as stack traces) on calls to Error(). Error() messages are +// always logged, regardless of the current verbosity. If there is no error +// instance available, passing nil is valid. +// +// Verbosity +// +// Often we want to log information only when the application in "verbose +// mode". To write log lines that are more verbose, Logger has a V() method. +// The higher the V-level of a log line, the less critical it is considered. +// Log-lines with V-levels that are not enabled (as per the LogSink) will not +// be written. Level V(0) is the default, and logger.V(0).Info() has the same +// meaning as logger.Info(). Negative V-levels have the same meaning as V(0). +// Error messages do not have a verbosity level and are always logged. +// +// Where we might have written: +// if flVerbose >= 2 { +// log.Printf("an unusual thing happened") +// } +// +// We can write: +// logger.V(2).Info("an unusual thing happened") +// +// Logger Names +// +// Logger instances can have name strings so that all messages logged through +// that instance have additional context. For example, you might want to add +// a subsystem name: +// +// logger.WithName("compactor").Info("started", "time", time.Now()) +// +// The WithName() method returns a new Logger, which can be passed to +// constructors or other functions for further use. Repeated use of WithName() +// will accumulate name "segments". These name segments will be joined in some +// way by the LogSink implementation. It is strongly recommended that name +// segments contain simple identifiers (letters, digits, and hyphen), and do +// not contain characters that could muddle the log output or confuse the +// joining operation (e.g. whitespace, commas, periods, slashes, brackets, +// quotes, etc). +// +// Saved Values +// +// Logger instances can store any number of key/value pairs, which will be +// logged alongside all messages logged through that instance. For example, +// you might want to create a Logger instance per managed object: +// +// With the standard log package, we might write: +// log.Printf("decided to set field foo to value %q for object %s/%s", +// targetValue, object.Namespace, object.Name) +// +// With logr we'd write: +// // Elsewhere: set up the logger to log the object name. +// obj.logger = mainLogger.WithValues( +// "name", obj.name, "namespace", obj.namespace) +// +// // later on... +// obj.logger.Info("setting foo", "value", targetValue) +// +// Best Practices +// +// Logger has very few hard rules, with the goal that LogSink implementations +// might have a lot of freedom to differentiate. There are, however, some +// things to consider. +// +// The log message consists of a constant message attached to the log line. +// This should generally be a simple description of what's occurring, and should +// never be a format string. Variable information can then be attached using +// named values. +// +// Keys are arbitrary strings, but should generally be constant values. Values +// may be any Go value, but how the value is formatted is determined by the +// LogSink implementation. +// +// Logger instances are meant to be passed around by value. Code that receives +// such a value can call its methods without having to check whether the +// instance is ready for use. +// +// Calling methods with the null logger (Logger{}) as instance will crash +// because it has no LogSink. Therefore this null logger should never be passed +// around. For cases where passing a logger is optional, a pointer to Logger +// should be used. +// +// Key Naming Conventions +// +// Keys are not strictly required to conform to any specification or regex, but +// it is recommended that they: +// * be human-readable and meaningful (not auto-generated or simple ordinals) +// * be constant (not dependent on input data) +// * contain only printable characters +// * not contain whitespace or punctuation +// * use lower case for simple keys and lowerCamelCase for more complex ones +// +// These guidelines help ensure that log data is processed properly regardless +// of the log implementation. For example, log implementations will try to +// output JSON data or will store data for later database (e.g. SQL) queries. +// +// While users are generally free to use key names of their choice, it's +// generally best to avoid using the following keys, as they're frequently used +// by implementations: +// * "caller": the calling information (file/line) of a particular log line +// * "error": the underlying error value in the `Error` method +// * "level": the log level +// * "logger": the name of the associated logger +// * "msg": the log message +// * "stacktrace": the stack trace associated with a particular log line or +// error (often from the `Error` message) +// * "ts": the timestamp for a log line +// +// Implementations are encouraged to make use of these keys to represent the +// above concepts, when necessary (for example, in a pure-JSON output form, it +// would be necessary to represent at least message and timestamp as ordinary +// named values). +// +// Break Glass +// +// Implementations may choose to give callers access to the underlying +// logging implementation. The recommended pattern for this is: +// // Underlier exposes access to the underlying logging implementation. +// // Since callers only have a logr.Logger, they have to know which +// // implementation is in use, so this interface is less of an abstraction +// // and more of way to test type conversion. +// type Underlier interface { +// GetUnderlying() +// } +// +// Logger grants access to the sink to enable type assertions like this: +// func DoSomethingWithImpl(log logr.Logger) { +// if underlier, ok := log.GetSink()(impl.Underlier) { +// implLogger := underlier.GetUnderlying() +// ... +// } +// } +// +// Custom `With*` functions can be implemented by copying the complete +// Logger struct and replacing the sink in the copy: +// // WithFooBar changes the foobar parameter in the log sink and returns a +// // new logger with that modified sink. It does nothing for loggers where +// // the sink doesn't support that parameter. +// func WithFoobar(log logr.Logger, foobar int) logr.Logger { +// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok { +// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) +// } +// return log +// } +// +// Don't use New to construct a new Logger with a LogSink retrieved from an +// existing Logger. Source code attribution might not work correctly and +// unexported fields in Logger get lost. +// +// Beware that the same LogSink instance may be shared by different logger +// instances. Calling functions that modify the LogSink will affect all of +// those. +package logr + +import ( + "context" +) + +// New returns a new Logger instance. This is primarily used by libraries +// implementing LogSink, rather than end users. +func New(sink LogSink) Logger { + logger := Logger{} + logger.setSink(sink) + sink.Init(runtimeInfo) + return logger +} + +// setSink stores the sink and updates any related fields. It mutates the +// logger and thus is only safe to use for loggers that are not currently being +// used concurrently. +func (l *Logger) setSink(sink LogSink) { + l.sink = sink +} + +// GetSink returns the stored sink. +func (l Logger) GetSink() LogSink { + return l.sink +} + +// WithSink returns a copy of the logger with the new sink. +func (l Logger) WithSink(sink LogSink) Logger { + l.setSink(sink) + return l +} + +// Logger is an interface to an abstract logging implementation. This is a +// concrete type for performance reasons, but all the real work is passed on to +// a LogSink. Implementations of LogSink should provide their own constructors +// that return Logger, not LogSink. +// +// The underlying sink can be accessed through GetSink and be modified through +// WithSink. This enables the implementation of custom extensions (see "Break +// Glass" in the package documentation). Normally the sink should be used only +// indirectly. +type Logger struct { + sink LogSink + level int +} + +// Enabled tests whether this Logger is enabled. For example, commandline +// flags might be used to set the logging verbosity and disable some info logs. +func (l Logger) Enabled() bool { + return l.sink.Enabled(l.level) +} + +// Info logs a non-error message with the given key/value pairs as context. +// +// The msg argument should be used to add some constant description to the log +// line. The key/value pairs can then be used to add additional variable +// information. The key/value pairs must alternate string keys and arbitrary +// values. +func (l Logger) Info(msg string, keysAndValues ...interface{}) { + if l.Enabled() { + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + withHelper.GetCallStackHelper()() + } + l.sink.Info(l.level, msg, keysAndValues...) + } +} + +// Error logs an error, with the given message and key/value pairs as context. +// It functions similarly to Info, but may have unique behavior, and should be +// preferred for logging errors (see the package documentations for more +// information). The log message will always be emitted, regardless of +// verbosity level. +// +// The msg argument should be used to add context to any underlying error, +// while the err argument should be used to attach the actual error that +// triggered this log line, if present. The err parameter is optional +// and nil may be passed instead of an error instance. +func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + withHelper.GetCallStackHelper()() + } + l.sink.Error(err, msg, keysAndValues...) +} + +// V returns a new Logger instance for a specific verbosity level, relative to +// this Logger. In other words, V-levels are additive. A higher verbosity +// level means a log message is less important. Negative V-levels are treated +// as 0. +func (l Logger) V(level int) Logger { + if level < 0 { + level = 0 + } + l.level += level + return l +} + +// WithValues returns a new Logger instance with additional key/value pairs. +// See Info for documentation on how key/value pairs work. +func (l Logger) WithValues(keysAndValues ...interface{}) Logger { + l.setSink(l.sink.WithValues(keysAndValues...)) + return l +} + +// WithName returns a new Logger instance with the specified name element added +// to the Logger's name. Successive calls with WithName append additional +// suffixes to the Logger's name. It's strongly recommended that name segments +// contain only letters, digits, and hyphens (see the package documentation for +// more information). +func (l Logger) WithName(name string) Logger { + l.setSink(l.sink.WithName(name)) + return l +} + +// WithCallDepth returns a Logger instance that offsets the call stack by the +// specified number of frames when logging call site information, if possible. +// This is useful for users who have helper functions between the "real" call +// site and the actual calls to Logger methods. If depth is 0 the attribution +// should be to the direct caller of this function. If depth is 1 the +// attribution should skip 1 call frame, and so on. Successive calls to this +// are additive. +// +// If the underlying log implementation supports a WithCallDepth(int) method, +// it will be called and the result returned. If the implementation does not +// support CallDepthLogSink, the original Logger will be returned. +// +// To skip one level, WithCallStackHelper() should be used instead of +// WithCallDepth(1) because it works with implementions that support the +// CallDepthLogSink and/or CallStackHelperLogSink interfaces. +func (l Logger) WithCallDepth(depth int) Logger { + if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { + l.setSink(withCallDepth.WithCallDepth(depth)) + } + return l +} + +// WithCallStackHelper returns a new Logger instance that skips the direct +// caller when logging call site information, if possible. This is useful for +// users who have helper functions between the "real" call site and the actual +// calls to Logger methods and want to support loggers which depend on marking +// each individual helper function, like loggers based on testing.T. +// +// In addition to using that new logger instance, callers also must call the +// returned function. +// +// If the underlying log implementation supports a WithCallDepth(int) method, +// WithCallDepth(1) will be called to produce a new logger. If it supports a +// WithCallStackHelper() method, that will be also called. If the +// implementation does not support either of these, the original Logger will be +// returned. +func (l Logger) WithCallStackHelper() (func(), Logger) { + var helper func() + if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { + l.setSink(withCallDepth.WithCallDepth(1)) + } + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + helper = withHelper.GetCallStackHelper() + } else { + helper = func() {} + } + return helper, l +} + +// contextKey is how we find Loggers in a context.Context. +type contextKey struct{} + +// FromContext returns a Logger from ctx or an error if no Logger is found. +func FromContext(ctx context.Context) (Logger, error) { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v, nil + } + + return Logger{}, notFoundError{} +} + +// notFoundError exists to carry an IsNotFound method. +type notFoundError struct{} + +func (notFoundError) Error() string { + return "no logr.Logger was present" +} + +func (notFoundError) IsNotFound() bool { + return true +} + +// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this +// returns a Logger that discards all log messages. +func FromContextOrDiscard(ctx context.Context) Logger { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v + } + + return Discard() +} + +// NewContext returns a new Context, derived from ctx, which carries the +// provided Logger. +func NewContext(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} + +// RuntimeInfo holds information that the logr "core" library knows which +// LogSinks might want to know. +type RuntimeInfo struct { + // CallDepth is the number of call frames the logr library adds between the + // end-user and the LogSink. LogSink implementations which choose to print + // the original logging site (e.g. file & line) should climb this many + // additional frames to find it. + CallDepth int +} + +// runtimeInfo is a static global. It must not be changed at run time. +var runtimeInfo = RuntimeInfo{ + CallDepth: 1, +} + +// LogSink represents a logging implementation. End-users will generally not +// interact with this type. +type LogSink interface { + // Init receives optional information about the logr library for LogSink + // implementations that need it. + Init(info RuntimeInfo) + + // Enabled tests whether this LogSink is enabled at the specified V-level. + // For example, commandline flags might be used to set the logging + // verbosity and disable some info logs. + Enabled(level int) bool + + // Info logs a non-error message with the given key/value pairs as context. + // The level argument is provided for optional logging. This method will + // only be called when Enabled(level) is true. See Logger.Info for more + // details. + Info(level int, msg string, keysAndValues ...interface{}) + + // Error logs an error, with the given message and key/value pairs as + // context. See Logger.Error for more details. + Error(err error, msg string, keysAndValues ...interface{}) + + // WithValues returns a new LogSink with additional key/value pairs. See + // Logger.WithValues for more details. + WithValues(keysAndValues ...interface{}) LogSink + + // WithName returns a new LogSink with the specified name appended. See + // Logger.WithName for more details. + WithName(name string) LogSink +} + +// CallDepthLogSink represents a Logger that knows how to climb the call stack +// to identify the original call site and can offset the depth by a specified +// number of frames. This is useful for users who have helper functions +// between the "real" call site and the actual calls to Logger methods. +// Implementations that log information about the call site (such as file, +// function, or line) would otherwise log information about the intermediate +// helper functions. +// +// This is an optional interface and implementations are not required to +// support it. +type CallDepthLogSink interface { + // WithCallDepth returns a LogSink that will offset the call + // stack by the specified number of frames when logging call + // site information. + // + // If depth is 0, the LogSink should skip exactly the number + // of call frames defined in RuntimeInfo.CallDepth when Info + // or Error are called, i.e. the attribution should be to the + // direct caller of Logger.Info or Logger.Error. + // + // If depth is 1 the attribution should skip 1 call frame, and so on. + // Successive calls to this are additive. + WithCallDepth(depth int) LogSink +} + +// CallStackHelperLogSink represents a Logger that knows how to climb +// the call stack to identify the original call site and can skip +// intermediate helper functions if they mark themselves as +// helper. Go's testing package uses that approach. +// +// This is useful for users who have helper functions between the +// "real" call site and the actual calls to Logger methods. +// Implementations that log information about the call site (such as +// file, function, or line) would otherwise log information about the +// intermediate helper functions. +// +// This is an optional interface and implementations are not required +// to support it. Implementations that choose to support this must not +// simply implement it as WithCallDepth(1), because +// Logger.WithCallStackHelper will call both methods if they are +// present. This should only be implemented for LogSinks that actually +// need it, as with testing.T. +type CallStackHelperLogSink interface { + // GetCallStackHelper returns a function that must be called + // to mark the direct caller as helper function when logging + // call site information. + GetCallStackHelper() func() +} + +// Marshaler is an optional interface that logged values may choose to +// implement. Loggers with structured output, such as JSON, should +// log the object return by the MarshalLog method instead of the +// original value. +type Marshaler interface { + // MarshalLog can be used to: + // - ensure that structs are not logged as strings when the original + // value has a String method: return a different type without a + // String method + // - select which fields of a complex type should get logged: + // return a simpler struct with fewer fields + // - log unexported fields: return a different struct + // with exported fields + // + // It may return any value of any type. + MarshalLog() interface{} +} diff --git a/vendor/go.opentelemetry.io/otel/metric/LICENSE b/vendor/github.com/go-logr/stdr/LICENSE similarity index 100% rename from vendor/go.opentelemetry.io/otel/metric/LICENSE rename to vendor/github.com/go-logr/stdr/LICENSE diff --git a/vendor/github.com/go-logr/stdr/README.md b/vendor/github.com/go-logr/stdr/README.md new file mode 100644 index 0000000000..5158667890 --- /dev/null +++ b/vendor/github.com/go-logr/stdr/README.md @@ -0,0 +1,6 @@ +# Minimal Go logging using logr and Go's standard library + +[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/stdr.svg)](https://pkg.go.dev/github.com/go-logr/stdr) + +This package implements the [logr interface](https://github.com/go-logr/logr) +in terms of Go's standard log package(https://pkg.go.dev/log). diff --git a/vendor/github.com/go-logr/stdr/stdr.go b/vendor/github.com/go-logr/stdr/stdr.go new file mode 100644 index 0000000000..93a8aab51b --- /dev/null +++ b/vendor/github.com/go-logr/stdr/stdr.go @@ -0,0 +1,170 @@ +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package stdr implements github.com/go-logr/logr.Logger in terms of +// Go's standard log package. +package stdr + +import ( + "log" + "os" + + "github.com/go-logr/logr" + "github.com/go-logr/logr/funcr" +) + +// The global verbosity level. See SetVerbosity(). +var globalVerbosity int + +// SetVerbosity sets the global level against which all info logs will be +// compared. If this is greater than or equal to the "V" of the logger, the +// message will be logged. A higher value here means more logs will be written. +// The previous verbosity value is returned. This is not concurrent-safe - +// callers must be sure to call it from only one goroutine. +func SetVerbosity(v int) int { + old := globalVerbosity + globalVerbosity = v + return old +} + +// New returns a logr.Logger which is implemented by Go's standard log package, +// or something like it. If std is nil, this will use a default logger +// instead. +// +// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) +func New(std StdLogger) logr.Logger { + return NewWithOptions(std, Options{}) +} + +// NewWithOptions returns a logr.Logger which is implemented by Go's standard +// log package, or something like it. See New for details. +func NewWithOptions(std StdLogger, opts Options) logr.Logger { + if std == nil { + // Go's log.Default() is only available in 1.16 and higher. + std = log.New(os.Stderr, "", log.LstdFlags) + } + + if opts.Depth < 0 { + opts.Depth = 0 + } + + fopts := funcr.Options{ + LogCaller: funcr.MessageClass(opts.LogCaller), + } + + sl := &logger{ + Formatter: funcr.NewFormatter(fopts), + std: std, + } + + // For skipping our own logger.Info/Error. + sl.Formatter.AddCallDepth(1 + opts.Depth) + + return logr.New(sl) +} + +// Options carries parameters which influence the way logs are generated. +type Options struct { + // Depth biases the assumed number of call frames to the "true" caller. + // This is useful when the calling code calls a function which then calls + // stdr (e.g. a logging shim to another API). Values less than zero will + // be treated as zero. + Depth int + + // LogCaller tells stdr to add a "caller" key to some or all log lines. + // Go's log package has options to log this natively, too. + LogCaller MessageClass + + // TODO: add an option to log the date/time +} + +// MessageClass indicates which category or categories of messages to consider. +type MessageClass int + +const ( + // None ignores all message classes. + None MessageClass = iota + // All considers all message classes. + All + // Info only considers info messages. + Info + // Error only considers error messages. + Error +) + +// StdLogger is the subset of the Go stdlib log.Logger API that is needed for +// this adapter. +type StdLogger interface { + // Output is the same as log.Output and log.Logger.Output. + Output(calldepth int, logline string) error +} + +type logger struct { + funcr.Formatter + std StdLogger +} + +var _ logr.LogSink = &logger{} +var _ logr.CallDepthLogSink = &logger{} + +func (l logger) Enabled(level int) bool { + return globalVerbosity >= level +} + +func (l logger) Info(level int, msg string, kvList ...interface{}) { + prefix, args := l.FormatInfo(level, msg, kvList) + if prefix != "" { + args = prefix + ": " + args + } + _ = l.std.Output(l.Formatter.GetDepth()+1, args) +} + +func (l logger) Error(err error, msg string, kvList ...interface{}) { + prefix, args := l.FormatError(err, msg, kvList) + if prefix != "" { + args = prefix + ": " + args + } + _ = l.std.Output(l.Formatter.GetDepth()+1, args) +} + +func (l logger) WithName(name string) logr.LogSink { + l.Formatter.AddName(name) + return &l +} + +func (l logger) WithValues(kvList ...interface{}) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l +} + +func (l logger) WithCallDepth(depth int) logr.LogSink { + l.Formatter.AddCallDepth(depth) + return &l +} + +// Underlier exposes access to the underlying logging implementation. Since +// callers only have a logr.Logger, they have to know which implementation is +// in use, so this interface is less of an abstraction and more of way to test +// type conversion. +type Underlier interface { + GetUnderlying() StdLogger +} + +// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger +// is itself an interface, the result may or may not be a Go log.Logger. +func (l logger) GetUnderlying() StdLogger { + return l.std +} diff --git a/vendor/github.com/gobwas/glob/.gitignore b/vendor/github.com/gobwas/glob/.gitignore new file mode 100644 index 0000000000..b4ae623be5 --- /dev/null +++ b/vendor/github.com/gobwas/glob/.gitignore @@ -0,0 +1,8 @@ +glob.iml +.idea +*.cpu +*.mem +*.test +*.dot +*.png +*.svg diff --git a/vendor/github.com/gobwas/glob/.travis.yml b/vendor/github.com/gobwas/glob/.travis.yml new file mode 100644 index 0000000000..e8a276826c --- /dev/null +++ b/vendor/github.com/gobwas/glob/.travis.yml @@ -0,0 +1,9 @@ +sudo: false + +language: go + +go: + - 1.5.3 + +script: + - go test -v ./... diff --git a/vendor/github.com/gobwas/glob/LICENSE b/vendor/github.com/gobwas/glob/LICENSE new file mode 100644 index 0000000000..9d4735cad9 --- /dev/null +++ b/vendor/github.com/gobwas/glob/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Sergey Kamardin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/gobwas/glob/bench.sh b/vendor/github.com/gobwas/glob/bench.sh new file mode 100644 index 0000000000..804cf22e64 --- /dev/null +++ b/vendor/github.com/gobwas/glob/bench.sh @@ -0,0 +1,26 @@ +#! /bin/bash + +bench() { + filename="/tmp/$1-$2.bench" + if test -e "${filename}"; + then + echo "Already exists ${filename}" + else + backup=`git rev-parse --abbrev-ref HEAD` + git checkout $1 + echo -n "Creating ${filename}... " + go test ./... -run=NONE -bench=$2 > "${filename}" -benchmem + echo "OK" + git checkout ${backup} + sleep 5 + fi +} + + +to=$1 +current=`git rev-parse --abbrev-ref HEAD` + +bench ${to} $2 +bench ${current} $2 + +benchcmp $3 "/tmp/${to}-$2.bench" "/tmp/${current}-$2.bench" diff --git a/vendor/github.com/gobwas/glob/compiler/compiler.go b/vendor/github.com/gobwas/glob/compiler/compiler.go new file mode 100644 index 0000000000..02e7de80a0 --- /dev/null +++ b/vendor/github.com/gobwas/glob/compiler/compiler.go @@ -0,0 +1,525 @@ +package compiler + +// TODO use constructor with all matchers, and to their structs private +// TODO glue multiple Text nodes (like after QuoteMeta) + +import ( + "fmt" + "reflect" + + "github.com/gobwas/glob/match" + "github.com/gobwas/glob/syntax/ast" + "github.com/gobwas/glob/util/runes" +) + +func optimizeMatcher(matcher match.Matcher) match.Matcher { + switch m := matcher.(type) { + + case match.Any: + if len(m.Separators) == 0 { + return match.NewSuper() + } + + case match.AnyOf: + if len(m.Matchers) == 1 { + return m.Matchers[0] + } + + return m + + case match.List: + if m.Not == false && len(m.List) == 1 { + return match.NewText(string(m.List)) + } + + return m + + case match.BTree: + m.Left = optimizeMatcher(m.Left) + m.Right = optimizeMatcher(m.Right) + + r, ok := m.Value.(match.Text) + if !ok { + return m + } + + var ( + leftNil = m.Left == nil + rightNil = m.Right == nil + ) + if leftNil && rightNil { + return match.NewText(r.Str) + } + + _, leftSuper := m.Left.(match.Super) + lp, leftPrefix := m.Left.(match.Prefix) + la, leftAny := m.Left.(match.Any) + + _, rightSuper := m.Right.(match.Super) + rs, rightSuffix := m.Right.(match.Suffix) + ra, rightAny := m.Right.(match.Any) + + switch { + case leftSuper && rightSuper: + return match.NewContains(r.Str, false) + + case leftSuper && rightNil: + return match.NewSuffix(r.Str) + + case rightSuper && leftNil: + return match.NewPrefix(r.Str) + + case leftNil && rightSuffix: + return match.NewPrefixSuffix(r.Str, rs.Suffix) + + case rightNil && leftPrefix: + return match.NewPrefixSuffix(lp.Prefix, r.Str) + + case rightNil && leftAny: + return match.NewSuffixAny(r.Str, la.Separators) + + case leftNil && rightAny: + return match.NewPrefixAny(r.Str, ra.Separators) + } + + return m + } + + return matcher +} + +func compileMatchers(matchers []match.Matcher) (match.Matcher, error) { + if len(matchers) == 0 { + return nil, fmt.Errorf("compile error: need at least one matcher") + } + if len(matchers) == 1 { + return matchers[0], nil + } + if m := glueMatchers(matchers); m != nil { + return m, nil + } + + idx := -1 + maxLen := -1 + var val match.Matcher + for i, matcher := range matchers { + if l := matcher.Len(); l != -1 && l >= maxLen { + maxLen = l + idx = i + val = matcher + } + } + + if val == nil { // not found matcher with static length + r, err := compileMatchers(matchers[1:]) + if err != nil { + return nil, err + } + return match.NewBTree(matchers[0], nil, r), nil + } + + left := matchers[:idx] + var right []match.Matcher + if len(matchers) > idx+1 { + right = matchers[idx+1:] + } + + var l, r match.Matcher + var err error + if len(left) > 0 { + l, err = compileMatchers(left) + if err != nil { + return nil, err + } + } + + if len(right) > 0 { + r, err = compileMatchers(right) + if err != nil { + return nil, err + } + } + + return match.NewBTree(val, l, r), nil +} + +func glueMatchers(matchers []match.Matcher) match.Matcher { + if m := glueMatchersAsEvery(matchers); m != nil { + return m + } + if m := glueMatchersAsRow(matchers); m != nil { + return m + } + return nil +} + +func glueMatchersAsRow(matchers []match.Matcher) match.Matcher { + if len(matchers) <= 1 { + return nil + } + + var ( + c []match.Matcher + l int + ) + for _, matcher := range matchers { + if ml := matcher.Len(); ml == -1 { + return nil + } else { + c = append(c, matcher) + l += ml + } + } + return match.NewRow(l, c...) +} + +func glueMatchersAsEvery(matchers []match.Matcher) match.Matcher { + if len(matchers) <= 1 { + return nil + } + + var ( + hasAny bool + hasSuper bool + hasSingle bool + min int + separator []rune + ) + + for i, matcher := range matchers { + var sep []rune + + switch m := matcher.(type) { + case match.Super: + sep = []rune{} + hasSuper = true + + case match.Any: + sep = m.Separators + hasAny = true + + case match.Single: + sep = m.Separators + hasSingle = true + min++ + + case match.List: + if !m.Not { + return nil + } + sep = m.List + hasSingle = true + min++ + + default: + return nil + } + + // initialize + if i == 0 { + separator = sep + } + + if runes.Equal(sep, separator) { + continue + } + + return nil + } + + if hasSuper && !hasAny && !hasSingle { + return match.NewSuper() + } + + if hasAny && !hasSuper && !hasSingle { + return match.NewAny(separator) + } + + if (hasAny || hasSuper) && min > 0 && len(separator) == 0 { + return match.NewMin(min) + } + + every := match.NewEveryOf() + + if min > 0 { + every.Add(match.NewMin(min)) + + if !hasAny && !hasSuper { + every.Add(match.NewMax(min)) + } + } + + if len(separator) > 0 { + every.Add(match.NewContains(string(separator), true)) + } + + return every +} + +func minimizeMatchers(matchers []match.Matcher) []match.Matcher { + var done match.Matcher + var left, right, count int + + for l := 0; l < len(matchers); l++ { + for r := len(matchers); r > l; r-- { + if glued := glueMatchers(matchers[l:r]); glued != nil { + var swap bool + + if done == nil { + swap = true + } else { + cl, gl := done.Len(), glued.Len() + swap = cl > -1 && gl > -1 && gl > cl + swap = swap || count < r-l + } + + if swap { + done = glued + left = l + right = r + count = r - l + } + } + } + } + + if done == nil { + return matchers + } + + next := append(append([]match.Matcher{}, matchers[:left]...), done) + if right < len(matchers) { + next = append(next, matchers[right:]...) + } + + if len(next) == len(matchers) { + return next + } + + return minimizeMatchers(next) +} + +// minimizeAnyOf tries to apply some heuristics to minimize number of nodes in given tree +func minimizeTree(tree *ast.Node) *ast.Node { + switch tree.Kind { + case ast.KindAnyOf: + return minimizeTreeAnyOf(tree) + default: + return nil + } +} + +// minimizeAnyOf tries to find common children of given node of AnyOf pattern +// it searches for common children from left and from right +// if any common children are found – then it returns new optimized ast tree +// else it returns nil +func minimizeTreeAnyOf(tree *ast.Node) *ast.Node { + if !areOfSameKind(tree.Children, ast.KindPattern) { + return nil + } + + commonLeft, commonRight := commonChildren(tree.Children) + commonLeftCount, commonRightCount := len(commonLeft), len(commonRight) + if commonLeftCount == 0 && commonRightCount == 0 { // there are no common parts + return nil + } + + var result []*ast.Node + if commonLeftCount > 0 { + result = append(result, ast.NewNode(ast.KindPattern, nil, commonLeft...)) + } + + var anyOf []*ast.Node + for _, child := range tree.Children { + reuse := child.Children[commonLeftCount : len(child.Children)-commonRightCount] + var node *ast.Node + if len(reuse) == 0 { + // this pattern is completely reduced by commonLeft and commonRight patterns + // so it become nothing + node = ast.NewNode(ast.KindNothing, nil) + } else { + node = ast.NewNode(ast.KindPattern, nil, reuse...) + } + anyOf = appendIfUnique(anyOf, node) + } + switch { + case len(anyOf) == 1 && anyOf[0].Kind != ast.KindNothing: + result = append(result, anyOf[0]) + case len(anyOf) > 1: + result = append(result, ast.NewNode(ast.KindAnyOf, nil, anyOf...)) + } + + if commonRightCount > 0 { + result = append(result, ast.NewNode(ast.KindPattern, nil, commonRight...)) + } + + return ast.NewNode(ast.KindPattern, nil, result...) +} + +func commonChildren(nodes []*ast.Node) (commonLeft, commonRight []*ast.Node) { + if len(nodes) <= 1 { + return + } + + // find node that has least number of children + idx := leastChildren(nodes) + if idx == -1 { + return + } + tree := nodes[idx] + treeLength := len(tree.Children) + + // allocate max able size for rightCommon slice + // to get ability insert elements in reverse order (from end to start) + // without sorting + commonRight = make([]*ast.Node, treeLength) + lastRight := treeLength // will use this to get results as commonRight[lastRight:] + + var ( + breakLeft bool + breakRight bool + commonTotal int + ) + for i, j := 0, treeLength-1; commonTotal < treeLength && j >= 0 && !(breakLeft && breakRight); i, j = i+1, j-1 { + treeLeft := tree.Children[i] + treeRight := tree.Children[j] + + for k := 0; k < len(nodes) && !(breakLeft && breakRight); k++ { + // skip least children node + if k == idx { + continue + } + + restLeft := nodes[k].Children[i] + restRight := nodes[k].Children[j+len(nodes[k].Children)-treeLength] + + breakLeft = breakLeft || !treeLeft.Equal(restLeft) + + // disable searching for right common parts, if left part is already overlapping + breakRight = breakRight || (!breakLeft && j <= i) + breakRight = breakRight || !treeRight.Equal(restRight) + } + + if !breakLeft { + commonTotal++ + commonLeft = append(commonLeft, treeLeft) + } + if !breakRight { + commonTotal++ + lastRight = j + commonRight[j] = treeRight + } + } + + commonRight = commonRight[lastRight:] + + return +} + +func appendIfUnique(target []*ast.Node, val *ast.Node) []*ast.Node { + for _, n := range target { + if reflect.DeepEqual(n, val) { + return target + } + } + return append(target, val) +} + +func areOfSameKind(nodes []*ast.Node, kind ast.Kind) bool { + for _, n := range nodes { + if n.Kind != kind { + return false + } + } + return true +} + +func leastChildren(nodes []*ast.Node) int { + min := -1 + idx := -1 + for i, n := range nodes { + if idx == -1 || (len(n.Children) < min) { + min = len(n.Children) + idx = i + } + } + return idx +} + +func compileTreeChildren(tree *ast.Node, sep []rune) ([]match.Matcher, error) { + var matchers []match.Matcher + for _, desc := range tree.Children { + m, err := compile(desc, sep) + if err != nil { + return nil, err + } + matchers = append(matchers, optimizeMatcher(m)) + } + return matchers, nil +} + +func compile(tree *ast.Node, sep []rune) (m match.Matcher, err error) { + switch tree.Kind { + case ast.KindAnyOf: + // todo this could be faster on pattern_alternatives_combine_lite (see glob_test.go) + if n := minimizeTree(tree); n != nil { + return compile(n, sep) + } + matchers, err := compileTreeChildren(tree, sep) + if err != nil { + return nil, err + } + return match.NewAnyOf(matchers...), nil + + case ast.KindPattern: + if len(tree.Children) == 0 { + return match.NewNothing(), nil + } + matchers, err := compileTreeChildren(tree, sep) + if err != nil { + return nil, err + } + m, err = compileMatchers(minimizeMatchers(matchers)) + if err != nil { + return nil, err + } + + case ast.KindAny: + m = match.NewAny(sep) + + case ast.KindSuper: + m = match.NewSuper() + + case ast.KindSingle: + m = match.NewSingle(sep) + + case ast.KindNothing: + m = match.NewNothing() + + case ast.KindList: + l := tree.Value.(ast.List) + m = match.NewList([]rune(l.Chars), l.Not) + + case ast.KindRange: + r := tree.Value.(ast.Range) + m = match.NewRange(r.Lo, r.Hi, r.Not) + + case ast.KindText: + t := tree.Value.(ast.Text) + m = match.NewText(t.Text) + + default: + return nil, fmt.Errorf("could not compile tree: unknown node type") + } + + return optimizeMatcher(m), nil +} + +func Compile(tree *ast.Node, sep []rune) (match.Matcher, error) { + m, err := compile(tree, sep) + if err != nil { + return nil, err + } + + return m, nil +} diff --git a/vendor/github.com/gobwas/glob/glob.go b/vendor/github.com/gobwas/glob/glob.go new file mode 100644 index 0000000000..2afde343af --- /dev/null +++ b/vendor/github.com/gobwas/glob/glob.go @@ -0,0 +1,80 @@ +package glob + +import ( + "github.com/gobwas/glob/compiler" + "github.com/gobwas/glob/syntax" +) + +// Glob represents compiled glob pattern. +type Glob interface { + Match(string) bool +} + +// Compile creates Glob for given pattern and strings (if any present after pattern) as separators. +// The pattern syntax is: +// +// pattern: +// { term } +// +// term: +// `*` matches any sequence of non-separator characters +// `**` matches any sequence of characters +// `?` matches any single non-separator character +// `[` [ `!` ] { character-range } `]` +// character class (must be non-empty) +// `{` pattern-list `}` +// pattern alternatives +// c matches character c (c != `*`, `**`, `?`, `\`, `[`, `{`, `}`) +// `\` c matches character c +// +// character-range: +// c matches character c (c != `\\`, `-`, `]`) +// `\` c matches character c +// lo `-` hi matches character c for lo <= c <= hi +// +// pattern-list: +// pattern { `,` pattern } +// comma-separated (without spaces) patterns +// +func Compile(pattern string, separators ...rune) (Glob, error) { + ast, err := syntax.Parse(pattern) + if err != nil { + return nil, err + } + + matcher, err := compiler.Compile(ast, separators) + if err != nil { + return nil, err + } + + return matcher, nil +} + +// MustCompile is the same as Compile, except that if Compile returns error, this will panic +func MustCompile(pattern string, separators ...rune) Glob { + g, err := Compile(pattern, separators...) + if err != nil { + panic(err) + } + + return g +} + +// QuoteMeta returns a string that quotes all glob pattern meta characters +// inside the argument text; For example, QuoteMeta(`{foo*}`) returns `\[foo\*\]`. +func QuoteMeta(s string) string { + b := make([]byte, 2*len(s)) + + // a byte loop is correct because all meta characters are ASCII + j := 0 + for i := 0; i < len(s); i++ { + if syntax.Special(s[i]) { + b[j] = '\\' + j++ + } + b[j] = s[i] + j++ + } + + return string(b[0:j]) +} diff --git a/vendor/github.com/gobwas/glob/match/any.go b/vendor/github.com/gobwas/glob/match/any.go new file mode 100644 index 0000000000..514a9a5c45 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/any.go @@ -0,0 +1,45 @@ +package match + +import ( + "fmt" + "github.com/gobwas/glob/util/strings" +) + +type Any struct { + Separators []rune +} + +func NewAny(s []rune) Any { + return Any{s} +} + +func (self Any) Match(s string) bool { + return strings.IndexAnyRunes(s, self.Separators) == -1 +} + +func (self Any) Index(s string) (int, []int) { + found := strings.IndexAnyRunes(s, self.Separators) + switch found { + case -1: + case 0: + return 0, segments0 + default: + s = s[:found] + } + + segments := acquireSegments(len(s)) + for i := range s { + segments = append(segments, i) + } + segments = append(segments, len(s)) + + return 0, segments +} + +func (self Any) Len() int { + return lenNo +} + +func (self Any) String() string { + return fmt.Sprintf("", string(self.Separators)) +} diff --git a/vendor/github.com/gobwas/glob/match/any_of.go b/vendor/github.com/gobwas/glob/match/any_of.go new file mode 100644 index 0000000000..8e65356cdc --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/any_of.go @@ -0,0 +1,82 @@ +package match + +import "fmt" + +type AnyOf struct { + Matchers Matchers +} + +func NewAnyOf(m ...Matcher) AnyOf { + return AnyOf{Matchers(m)} +} + +func (self *AnyOf) Add(m Matcher) error { + self.Matchers = append(self.Matchers, m) + return nil +} + +func (self AnyOf) Match(s string) bool { + for _, m := range self.Matchers { + if m.Match(s) { + return true + } + } + + return false +} + +func (self AnyOf) Index(s string) (int, []int) { + index := -1 + + segments := acquireSegments(len(s)) + for _, m := range self.Matchers { + idx, seg := m.Index(s) + if idx == -1 { + continue + } + + if index == -1 || idx < index { + index = idx + segments = append(segments[:0], seg...) + continue + } + + if idx > index { + continue + } + + // here idx == index + segments = appendMerge(segments, seg) + } + + if index == -1 { + releaseSegments(segments) + return -1, nil + } + + return index, segments +} + +func (self AnyOf) Len() (l int) { + l = -1 + for _, m := range self.Matchers { + ml := m.Len() + switch { + case l == -1: + l = ml + continue + + case ml == -1: + return -1 + + case l != ml: + return -1 + } + } + + return +} + +func (self AnyOf) String() string { + return fmt.Sprintf("", self.Matchers) +} diff --git a/vendor/github.com/gobwas/glob/match/btree.go b/vendor/github.com/gobwas/glob/match/btree.go new file mode 100644 index 0000000000..a8130e93ea --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/btree.go @@ -0,0 +1,146 @@ +package match + +import ( + "fmt" + "unicode/utf8" +) + +type BTree struct { + Value Matcher + Left Matcher + Right Matcher + ValueLengthRunes int + LeftLengthRunes int + RightLengthRunes int + LengthRunes int +} + +func NewBTree(Value, Left, Right Matcher) (tree BTree) { + tree.Value = Value + tree.Left = Left + tree.Right = Right + + lenOk := true + if tree.ValueLengthRunes = Value.Len(); tree.ValueLengthRunes == -1 { + lenOk = false + } + + if Left != nil { + if tree.LeftLengthRunes = Left.Len(); tree.LeftLengthRunes == -1 { + lenOk = false + } + } + + if Right != nil { + if tree.RightLengthRunes = Right.Len(); tree.RightLengthRunes == -1 { + lenOk = false + } + } + + if lenOk { + tree.LengthRunes = tree.LeftLengthRunes + tree.ValueLengthRunes + tree.RightLengthRunes + } else { + tree.LengthRunes = -1 + } + + return tree +} + +func (self BTree) Len() int { + return self.LengthRunes +} + +// todo? +func (self BTree) Index(s string) (int, []int) { + return -1, nil +} + +func (self BTree) Match(s string) bool { + inputLen := len(s) + + // self.Length, self.RLen and self.LLen are values meaning the length of runes for each part + // here we manipulating byte length for better optimizations + // but these checks still works, cause minLen of 1-rune string is 1 byte. + if self.LengthRunes != -1 && self.LengthRunes > inputLen { + return false + } + + // try to cut unnecessary parts + // by knowledge of length of right and left part + var offset, limit int + if self.LeftLengthRunes >= 0 { + offset = self.LeftLengthRunes + } + if self.RightLengthRunes >= 0 { + limit = inputLen - self.RightLengthRunes + } else { + limit = inputLen + } + + for offset < limit { + // search for matching part in substring + index, segments := self.Value.Index(s[offset:limit]) + if index == -1 { + releaseSegments(segments) + return false + } + + l := s[:offset+index] + var left bool + if self.Left != nil { + left = self.Left.Match(l) + } else { + left = l == "" + } + + if left { + for i := len(segments) - 1; i >= 0; i-- { + length := segments[i] + + var right bool + var r string + // if there is no string for the right branch + if inputLen <= offset+index+length { + r = "" + } else { + r = s[offset+index+length:] + } + + if self.Right != nil { + right = self.Right.Match(r) + } else { + right = r == "" + } + + if right { + releaseSegments(segments) + return true + } + } + } + + _, step := utf8.DecodeRuneInString(s[offset+index:]) + offset += index + step + + releaseSegments(segments) + } + + return false +} + +func (self BTree) String() string { + const n string = "" + var l, r string + if self.Left == nil { + l = n + } else { + l = self.Left.String() + } + if self.Right == nil { + r = n + } else { + r = self.Right.String() + } + + return fmt.Sprintf("%s]>", l, self.Value, r) +} diff --git a/vendor/github.com/gobwas/glob/match/contains.go b/vendor/github.com/gobwas/glob/match/contains.go new file mode 100644 index 0000000000..0998e95b0e --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/contains.go @@ -0,0 +1,58 @@ +package match + +import ( + "fmt" + "strings" +) + +type Contains struct { + Needle string + Not bool +} + +func NewContains(needle string, not bool) Contains { + return Contains{needle, not} +} + +func (self Contains) Match(s string) bool { + return strings.Contains(s, self.Needle) != self.Not +} + +func (self Contains) Index(s string) (int, []int) { + var offset int + + idx := strings.Index(s, self.Needle) + + if !self.Not { + if idx == -1 { + return -1, nil + } + + offset = idx + len(self.Needle) + if len(s) <= offset { + return 0, []int{offset} + } + s = s[offset:] + } else if idx != -1 { + s = s[:idx] + } + + segments := acquireSegments(len(s) + 1) + for i := range s { + segments = append(segments, offset+i) + } + + return 0, append(segments, offset+len(s)) +} + +func (self Contains) Len() int { + return lenNo +} + +func (self Contains) String() string { + var not string + if self.Not { + not = "!" + } + return fmt.Sprintf("", not, self.Needle) +} diff --git a/vendor/github.com/gobwas/glob/match/every_of.go b/vendor/github.com/gobwas/glob/match/every_of.go new file mode 100644 index 0000000000..7c968ee368 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/every_of.go @@ -0,0 +1,99 @@ +package match + +import ( + "fmt" +) + +type EveryOf struct { + Matchers Matchers +} + +func NewEveryOf(m ...Matcher) EveryOf { + return EveryOf{Matchers(m)} +} + +func (self *EveryOf) Add(m Matcher) error { + self.Matchers = append(self.Matchers, m) + return nil +} + +func (self EveryOf) Len() (l int) { + for _, m := range self.Matchers { + if ml := m.Len(); l > 0 { + l += ml + } else { + return -1 + } + } + + return +} + +func (self EveryOf) Index(s string) (int, []int) { + var index int + var offset int + + // make `in` with cap as len(s), + // cause it is the maximum size of output segments values + next := acquireSegments(len(s)) + current := acquireSegments(len(s)) + + sub := s + for i, m := range self.Matchers { + idx, seg := m.Index(sub) + if idx == -1 { + releaseSegments(next) + releaseSegments(current) + return -1, nil + } + + if i == 0 { + // we use copy here instead of `current = seg` + // cause seg is a slice from reusable buffer `in` + // and it could be overwritten in next iteration + current = append(current, seg...) + } else { + // clear the next + next = next[:0] + + delta := index - (idx + offset) + for _, ex := range current { + for _, n := range seg { + if ex+delta == n { + next = append(next, n) + } + } + } + + if len(next) == 0 { + releaseSegments(next) + releaseSegments(current) + return -1, nil + } + + current = append(current[:0], next...) + } + + index = idx + offset + sub = s[index:] + offset += idx + } + + releaseSegments(next) + + return index, current +} + +func (self EveryOf) Match(s string) bool { + for _, m := range self.Matchers { + if !m.Match(s) { + return false + } + } + + return true +} + +func (self EveryOf) String() string { + return fmt.Sprintf("", self.Matchers) +} diff --git a/vendor/github.com/gobwas/glob/match/list.go b/vendor/github.com/gobwas/glob/match/list.go new file mode 100644 index 0000000000..7fd763ecd8 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/list.go @@ -0,0 +1,49 @@ +package match + +import ( + "fmt" + "github.com/gobwas/glob/util/runes" + "unicode/utf8" +) + +type List struct { + List []rune + Not bool +} + +func NewList(list []rune, not bool) List { + return List{list, not} +} + +func (self List) Match(s string) bool { + r, w := utf8.DecodeRuneInString(s) + if len(s) > w { + return false + } + + inList := runes.IndexRune(self.List, r) != -1 + return inList == !self.Not +} + +func (self List) Len() int { + return lenOne +} + +func (self List) Index(s string) (int, []int) { + for i, r := range s { + if self.Not == (runes.IndexRune(self.List, r) == -1) { + return i, segmentsByRuneLength[utf8.RuneLen(r)] + } + } + + return -1, nil +} + +func (self List) String() string { + var not string + if self.Not { + not = "!" + } + + return fmt.Sprintf("", not, string(self.List)) +} diff --git a/vendor/github.com/gobwas/glob/match/match.go b/vendor/github.com/gobwas/glob/match/match.go new file mode 100644 index 0000000000..f80e007fb8 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/match.go @@ -0,0 +1,81 @@ +package match + +// todo common table of rune's length + +import ( + "fmt" + "strings" +) + +const lenOne = 1 +const lenZero = 0 +const lenNo = -1 + +type Matcher interface { + Match(string) bool + Index(string) (int, []int) + Len() int + String() string +} + +type Matchers []Matcher + +func (m Matchers) String() string { + var s []string + for _, matcher := range m { + s = append(s, fmt.Sprint(matcher)) + } + + return fmt.Sprintf("%s", strings.Join(s, ",")) +} + +// appendMerge merges and sorts given already SORTED and UNIQUE segments. +func appendMerge(target, sub []int) []int { + lt, ls := len(target), len(sub) + out := make([]int, 0, lt+ls) + + for x, y := 0, 0; x < lt || y < ls; { + if x >= lt { + out = append(out, sub[y:]...) + break + } + + if y >= ls { + out = append(out, target[x:]...) + break + } + + xValue := target[x] + yValue := sub[y] + + switch { + + case xValue == yValue: + out = append(out, xValue) + x++ + y++ + + case xValue < yValue: + out = append(out, xValue) + x++ + + case yValue < xValue: + out = append(out, yValue) + y++ + + } + } + + target = append(target[:0], out...) + + return target +} + +func reverseSegments(input []int) { + l := len(input) + m := l / 2 + + for i := 0; i < m; i++ { + input[i], input[l-i-1] = input[l-i-1], input[i] + } +} diff --git a/vendor/github.com/gobwas/glob/match/max.go b/vendor/github.com/gobwas/glob/match/max.go new file mode 100644 index 0000000000..d72f69efff --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/max.go @@ -0,0 +1,49 @@ +package match + +import ( + "fmt" + "unicode/utf8" +) + +type Max struct { + Limit int +} + +func NewMax(l int) Max { + return Max{l} +} + +func (self Max) Match(s string) bool { + var l int + for range s { + l += 1 + if l > self.Limit { + return false + } + } + + return true +} + +func (self Max) Index(s string) (int, []int) { + segments := acquireSegments(self.Limit + 1) + segments = append(segments, 0) + var count int + for i, r := range s { + count++ + if count > self.Limit { + break + } + segments = append(segments, i+utf8.RuneLen(r)) + } + + return 0, segments +} + +func (self Max) Len() int { + return lenNo +} + +func (self Max) String() string { + return fmt.Sprintf("", self.Limit) +} diff --git a/vendor/github.com/gobwas/glob/match/min.go b/vendor/github.com/gobwas/glob/match/min.go new file mode 100644 index 0000000000..db57ac8eb4 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/min.go @@ -0,0 +1,57 @@ +package match + +import ( + "fmt" + "unicode/utf8" +) + +type Min struct { + Limit int +} + +func NewMin(l int) Min { + return Min{l} +} + +func (self Min) Match(s string) bool { + var l int + for range s { + l += 1 + if l >= self.Limit { + return true + } + } + + return false +} + +func (self Min) Index(s string) (int, []int) { + var count int + + c := len(s) - self.Limit + 1 + if c <= 0 { + return -1, nil + } + + segments := acquireSegments(c) + for i, r := range s { + count++ + if count >= self.Limit { + segments = append(segments, i+utf8.RuneLen(r)) + } + } + + if len(segments) == 0 { + return -1, nil + } + + return 0, segments +} + +func (self Min) Len() int { + return lenNo +} + +func (self Min) String() string { + return fmt.Sprintf("", self.Limit) +} diff --git a/vendor/github.com/gobwas/glob/match/nothing.go b/vendor/github.com/gobwas/glob/match/nothing.go new file mode 100644 index 0000000000..0d4ecd36b8 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/nothing.go @@ -0,0 +1,27 @@ +package match + +import ( + "fmt" +) + +type Nothing struct{} + +func NewNothing() Nothing { + return Nothing{} +} + +func (self Nothing) Match(s string) bool { + return len(s) == 0 +} + +func (self Nothing) Index(s string) (int, []int) { + return 0, segments0 +} + +func (self Nothing) Len() int { + return lenZero +} + +func (self Nothing) String() string { + return fmt.Sprintf("") +} diff --git a/vendor/github.com/gobwas/glob/match/prefix.go b/vendor/github.com/gobwas/glob/match/prefix.go new file mode 100644 index 0000000000..a7347250e8 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/prefix.go @@ -0,0 +1,50 @@ +package match + +import ( + "fmt" + "strings" + "unicode/utf8" +) + +type Prefix struct { + Prefix string +} + +func NewPrefix(p string) Prefix { + return Prefix{p} +} + +func (self Prefix) Index(s string) (int, []int) { + idx := strings.Index(s, self.Prefix) + if idx == -1 { + return -1, nil + } + + length := len(self.Prefix) + var sub string + if len(s) > idx+length { + sub = s[idx+length:] + } else { + sub = "" + } + + segments := acquireSegments(len(sub) + 1) + segments = append(segments, length) + for i, r := range sub { + segments = append(segments, length+i+utf8.RuneLen(r)) + } + + return idx, segments +} + +func (self Prefix) Len() int { + return lenNo +} + +func (self Prefix) Match(s string) bool { + return strings.HasPrefix(s, self.Prefix) +} + +func (self Prefix) String() string { + return fmt.Sprintf("", self.Prefix) +} diff --git a/vendor/github.com/gobwas/glob/match/prefix_any.go b/vendor/github.com/gobwas/glob/match/prefix_any.go new file mode 100644 index 0000000000..8ee58fe1b3 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/prefix_any.go @@ -0,0 +1,55 @@ +package match + +import ( + "fmt" + "strings" + "unicode/utf8" + + sutil "github.com/gobwas/glob/util/strings" +) + +type PrefixAny struct { + Prefix string + Separators []rune +} + +func NewPrefixAny(s string, sep []rune) PrefixAny { + return PrefixAny{s, sep} +} + +func (self PrefixAny) Index(s string) (int, []int) { + idx := strings.Index(s, self.Prefix) + if idx == -1 { + return -1, nil + } + + n := len(self.Prefix) + sub := s[idx+n:] + i := sutil.IndexAnyRunes(sub, self.Separators) + if i > -1 { + sub = sub[:i] + } + + seg := acquireSegments(len(sub) + 1) + seg = append(seg, n) + for i, r := range sub { + seg = append(seg, n+i+utf8.RuneLen(r)) + } + + return idx, seg +} + +func (self PrefixAny) Len() int { + return lenNo +} + +func (self PrefixAny) Match(s string) bool { + if !strings.HasPrefix(s, self.Prefix) { + return false + } + return sutil.IndexAnyRunes(s[len(self.Prefix):], self.Separators) == -1 +} + +func (self PrefixAny) String() string { + return fmt.Sprintf("", self.Prefix, string(self.Separators)) +} diff --git a/vendor/github.com/gobwas/glob/match/prefix_suffix.go b/vendor/github.com/gobwas/glob/match/prefix_suffix.go new file mode 100644 index 0000000000..8208085a19 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/prefix_suffix.go @@ -0,0 +1,62 @@ +package match + +import ( + "fmt" + "strings" +) + +type PrefixSuffix struct { + Prefix, Suffix string +} + +func NewPrefixSuffix(p, s string) PrefixSuffix { + return PrefixSuffix{p, s} +} + +func (self PrefixSuffix) Index(s string) (int, []int) { + prefixIdx := strings.Index(s, self.Prefix) + if prefixIdx == -1 { + return -1, nil + } + + suffixLen := len(self.Suffix) + if suffixLen <= 0 { + return prefixIdx, []int{len(s) - prefixIdx} + } + + if (len(s) - prefixIdx) <= 0 { + return -1, nil + } + + segments := acquireSegments(len(s) - prefixIdx) + for sub := s[prefixIdx:]; ; { + suffixIdx := strings.LastIndex(sub, self.Suffix) + if suffixIdx == -1 { + break + } + + segments = append(segments, suffixIdx+suffixLen) + sub = sub[:suffixIdx] + } + + if len(segments) == 0 { + releaseSegments(segments) + return -1, nil + } + + reverseSegments(segments) + + return prefixIdx, segments +} + +func (self PrefixSuffix) Len() int { + return lenNo +} + +func (self PrefixSuffix) Match(s string) bool { + return strings.HasPrefix(s, self.Prefix) && strings.HasSuffix(s, self.Suffix) +} + +func (self PrefixSuffix) String() string { + return fmt.Sprintf("", self.Prefix, self.Suffix) +} diff --git a/vendor/github.com/gobwas/glob/match/range.go b/vendor/github.com/gobwas/glob/match/range.go new file mode 100644 index 0000000000..ce30245a40 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/range.go @@ -0,0 +1,48 @@ +package match + +import ( + "fmt" + "unicode/utf8" +) + +type Range struct { + Lo, Hi rune + Not bool +} + +func NewRange(lo, hi rune, not bool) Range { + return Range{lo, hi, not} +} + +func (self Range) Len() int { + return lenOne +} + +func (self Range) Match(s string) bool { + r, w := utf8.DecodeRuneInString(s) + if len(s) > w { + return false + } + + inRange := r >= self.Lo && r <= self.Hi + + return inRange == !self.Not +} + +func (self Range) Index(s string) (int, []int) { + for i, r := range s { + if self.Not != (r >= self.Lo && r <= self.Hi) { + return i, segmentsByRuneLength[utf8.RuneLen(r)] + } + } + + return -1, nil +} + +func (self Range) String() string { + var not string + if self.Not { + not = "!" + } + return fmt.Sprintf("", not, string(self.Lo), string(self.Hi)) +} diff --git a/vendor/github.com/gobwas/glob/match/row.go b/vendor/github.com/gobwas/glob/match/row.go new file mode 100644 index 0000000000..4379042e42 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/row.go @@ -0,0 +1,77 @@ +package match + +import ( + "fmt" +) + +type Row struct { + Matchers Matchers + RunesLength int + Segments []int +} + +func NewRow(len int, m ...Matcher) Row { + return Row{ + Matchers: Matchers(m), + RunesLength: len, + Segments: []int{len}, + } +} + +func (self Row) matchAll(s string) bool { + var idx int + for _, m := range self.Matchers { + length := m.Len() + + var next, i int + for next = range s[idx:] { + i++ + if i == length { + break + } + } + + if i < length || !m.Match(s[idx:idx+next+1]) { + return false + } + + idx += next + 1 + } + + return true +} + +func (self Row) lenOk(s string) bool { + var i int + for range s { + i++ + if i > self.RunesLength { + return false + } + } + return self.RunesLength == i +} + +func (self Row) Match(s string) bool { + return self.lenOk(s) && self.matchAll(s) +} + +func (self Row) Len() (l int) { + return self.RunesLength +} + +func (self Row) Index(s string) (int, []int) { + for i := range s { + if len(s[i:]) < self.RunesLength { + break + } + if self.matchAll(s[i:]) { + return i, self.Segments + } + } + return -1, nil +} + +func (self Row) String() string { + return fmt.Sprintf("", self.RunesLength, self.Matchers) +} diff --git a/vendor/github.com/gobwas/glob/match/segments.go b/vendor/github.com/gobwas/glob/match/segments.go new file mode 100644 index 0000000000..9ea6f30943 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/segments.go @@ -0,0 +1,91 @@ +package match + +import ( + "sync" +) + +type SomePool interface { + Get() []int + Put([]int) +} + +var segmentsPools [1024]sync.Pool + +func toPowerOfTwo(v int) int { + v-- + v |= v >> 1 + v |= v >> 2 + v |= v >> 4 + v |= v >> 8 + v |= v >> 16 + v++ + + return v +} + +const ( + cacheFrom = 16 + cacheToAndHigher = 1024 + cacheFromIndex = 15 + cacheToAndHigherIndex = 1023 +) + +var ( + segments0 = []int{0} + segments1 = []int{1} + segments2 = []int{2} + segments3 = []int{3} + segments4 = []int{4} +) + +var segmentsByRuneLength [5][]int = [5][]int{ + 0: segments0, + 1: segments1, + 2: segments2, + 3: segments3, + 4: segments4, +} + +func init() { + for i := cacheToAndHigher; i >= cacheFrom; i >>= 1 { + func(i int) { + segmentsPools[i-1] = sync.Pool{New: func() interface{} { + return make([]int, 0, i) + }} + }(i) + } +} + +func getTableIndex(c int) int { + p := toPowerOfTwo(c) + switch { + case p >= cacheToAndHigher: + return cacheToAndHigherIndex + case p <= cacheFrom: + return cacheFromIndex + default: + return p - 1 + } +} + +func acquireSegments(c int) []int { + // make []int with less capacity than cacheFrom + // is faster than acquiring it from pool + if c < cacheFrom { + return make([]int, 0, c) + } + + return segmentsPools[getTableIndex(c)].Get().([]int)[:0] +} + +func releaseSegments(s []int) { + c := cap(s) + + // make []int with less capacity than cacheFrom + // is faster than acquiring it from pool + if c < cacheFrom { + return + } + + segmentsPools[getTableIndex(c)].Put(s) +} diff --git a/vendor/github.com/gobwas/glob/match/single.go b/vendor/github.com/gobwas/glob/match/single.go new file mode 100644 index 0000000000..ee6e3954c1 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/single.go @@ -0,0 +1,43 @@ +package match + +import ( + "fmt" + "github.com/gobwas/glob/util/runes" + "unicode/utf8" +) + +// single represents ? +type Single struct { + Separators []rune +} + +func NewSingle(s []rune) Single { + return Single{s} +} + +func (self Single) Match(s string) bool { + r, w := utf8.DecodeRuneInString(s) + if len(s) > w { + return false + } + + return runes.IndexRune(self.Separators, r) == -1 +} + +func (self Single) Len() int { + return lenOne +} + +func (self Single) Index(s string) (int, []int) { + for i, r := range s { + if runes.IndexRune(self.Separators, r) == -1 { + return i, segmentsByRuneLength[utf8.RuneLen(r)] + } + } + + return -1, nil +} + +func (self Single) String() string { + return fmt.Sprintf("", string(self.Separators)) +} diff --git a/vendor/github.com/gobwas/glob/match/suffix.go b/vendor/github.com/gobwas/glob/match/suffix.go new file mode 100644 index 0000000000..85bea8c68e --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/suffix.go @@ -0,0 +1,35 @@ +package match + +import ( + "fmt" + "strings" +) + +type Suffix struct { + Suffix string +} + +func NewSuffix(s string) Suffix { + return Suffix{s} +} + +func (self Suffix) Len() int { + return lenNo +} + +func (self Suffix) Match(s string) bool { + return strings.HasSuffix(s, self.Suffix) +} + +func (self Suffix) Index(s string) (int, []int) { + idx := strings.Index(s, self.Suffix) + if idx == -1 { + return -1, nil + } + + return 0, []int{idx + len(self.Suffix)} +} + +func (self Suffix) String() string { + return fmt.Sprintf("", self.Suffix) +} diff --git a/vendor/github.com/gobwas/glob/match/suffix_any.go b/vendor/github.com/gobwas/glob/match/suffix_any.go new file mode 100644 index 0000000000..c5106f8196 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/suffix_any.go @@ -0,0 +1,43 @@ +package match + +import ( + "fmt" + "strings" + + sutil "github.com/gobwas/glob/util/strings" +) + +type SuffixAny struct { + Suffix string + Separators []rune +} + +func NewSuffixAny(s string, sep []rune) SuffixAny { + return SuffixAny{s, sep} +} + +func (self SuffixAny) Index(s string) (int, []int) { + idx := strings.Index(s, self.Suffix) + if idx == -1 { + return -1, nil + } + + i := sutil.LastIndexAnyRunes(s[:idx], self.Separators) + 1 + + return i, []int{idx + len(self.Suffix) - i} +} + +func (self SuffixAny) Len() int { + return lenNo +} + +func (self SuffixAny) Match(s string) bool { + if !strings.HasSuffix(s, self.Suffix) { + return false + } + return sutil.IndexAnyRunes(s[:len(s)-len(self.Suffix)], self.Separators) == -1 +} + +func (self SuffixAny) String() string { + return fmt.Sprintf("", string(self.Separators), self.Suffix) +} diff --git a/vendor/github.com/gobwas/glob/match/super.go b/vendor/github.com/gobwas/glob/match/super.go new file mode 100644 index 0000000000..3875950bb8 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/super.go @@ -0,0 +1,33 @@ +package match + +import ( + "fmt" +) + +type Super struct{} + +func NewSuper() Super { + return Super{} +} + +func (self Super) Match(s string) bool { + return true +} + +func (self Super) Len() int { + return lenNo +} + +func (self Super) Index(s string) (int, []int) { + segments := acquireSegments(len(s) + 1) + for i := range s { + segments = append(segments, i) + } + segments = append(segments, len(s)) + + return 0, segments +} + +func (self Super) String() string { + return fmt.Sprintf("") +} diff --git a/vendor/github.com/gobwas/glob/match/text.go b/vendor/github.com/gobwas/glob/match/text.go new file mode 100644 index 0000000000..0a17616d3c --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/text.go @@ -0,0 +1,45 @@ +package match + +import ( + "fmt" + "strings" + "unicode/utf8" +) + +// raw represents raw string to match +type Text struct { + Str string + RunesLength int + BytesLength int + Segments []int +} + +func NewText(s string) Text { + return Text{ + Str: s, + RunesLength: utf8.RuneCountInString(s), + BytesLength: len(s), + Segments: []int{len(s)}, + } +} + +func (self Text) Match(s string) bool { + return self.Str == s +} + +func (self Text) Len() int { + return self.RunesLength +} + +func (self Text) Index(s string) (int, []int) { + index := strings.Index(s, self.Str) + if index == -1 { + return -1, nil + } + + return index, self.Segments +} + +func (self Text) String() string { + return fmt.Sprintf("", self.Str) +} diff --git a/vendor/github.com/gobwas/glob/readme.md b/vendor/github.com/gobwas/glob/readme.md new file mode 100644 index 0000000000..f58144e733 --- /dev/null +++ b/vendor/github.com/gobwas/glob/readme.md @@ -0,0 +1,148 @@ +# glob.[go](https://golang.org) + +[![GoDoc][godoc-image]][godoc-url] [![Build Status][travis-image]][travis-url] + +> Go Globbing Library. + +## Install + +```shell + go get github.com/gobwas/glob +``` + +## Example + +```go + +package main + +import "github.com/gobwas/glob" + +func main() { + var g glob.Glob + + // create simple glob + g = glob.MustCompile("*.github.com") + g.Match("api.github.com") // true + + // quote meta characters and then create simple glob + g = glob.MustCompile(glob.QuoteMeta("*.github.com")) + g.Match("*.github.com") // true + + // create new glob with set of delimiters as ["."] + g = glob.MustCompile("api.*.com", '.') + g.Match("api.github.com") // true + g.Match("api.gi.hub.com") // false + + // create new glob with set of delimiters as ["."] + // but now with super wildcard + g = glob.MustCompile("api.**.com", '.') + g.Match("api.github.com") // true + g.Match("api.gi.hub.com") // true + + // create glob with single symbol wildcard + g = glob.MustCompile("?at") + g.Match("cat") // true + g.Match("fat") // true + g.Match("at") // false + + // create glob with single symbol wildcard and delimiters ['f'] + g = glob.MustCompile("?at", 'f') + g.Match("cat") // true + g.Match("fat") // false + g.Match("at") // false + + // create glob with character-list matchers + g = glob.MustCompile("[abc]at") + g.Match("cat") // true + g.Match("bat") // true + g.Match("fat") // false + g.Match("at") // false + + // create glob with character-list matchers + g = glob.MustCompile("[!abc]at") + g.Match("cat") // false + g.Match("bat") // false + g.Match("fat") // true + g.Match("at") // false + + // create glob with character-range matchers + g = glob.MustCompile("[a-c]at") + g.Match("cat") // true + g.Match("bat") // true + g.Match("fat") // false + g.Match("at") // false + + // create glob with character-range matchers + g = glob.MustCompile("[!a-c]at") + g.Match("cat") // false + g.Match("bat") // false + g.Match("fat") // true + g.Match("at") // false + + // create glob with pattern-alternatives list + g = glob.MustCompile("{cat,bat,[fr]at}") + g.Match("cat") // true + g.Match("bat") // true + g.Match("fat") // true + g.Match("rat") // true + g.Match("at") // false + g.Match("zat") // false +} + +``` + +## Performance + +This library is created for compile-once patterns. This means, that compilation could take time, but +strings matching is done faster, than in case when always parsing template. + +If you will not use compiled `glob.Glob` object, and do `g := glob.MustCompile(pattern); g.Match(...)` every time, then your code will be much more slower. + +Run `go test -bench=.` from source root to see the benchmarks: + +Pattern | Fixture | Match | Speed (ns/op) +--------|---------|-------|-------------- +`[a-z][!a-x]*cat*[h][!b]*eyes*` | `my cat has very bright eyes` | `true` | 432 +`[a-z][!a-x]*cat*[h][!b]*eyes*` | `my dog has very bright eyes` | `false` | 199 +`https://*.google.*` | `https://account.google.com` | `true` | 96 +`https://*.google.*` | `https://google.com` | `false` | 66 +`{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://yahoo.com` | `true` | 163 +`{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://google.com` | `false` | 197 +`{https://*gobwas.com,http://exclude.gobwas.com}` | `https://safe.gobwas.com` | `true` | 22 +`{https://*gobwas.com,http://exclude.gobwas.com}` | `http://safe.gobwas.com` | `false` | 24 +`abc*` | `abcdef` | `true` | 8.15 +`abc*` | `af` | `false` | 5.68 +`*def` | `abcdef` | `true` | 8.84 +`*def` | `af` | `false` | 5.74 +`ab*ef` | `abcdef` | `true` | 15.2 +`ab*ef` | `af` | `false` | 10.4 + +The same things with `regexp` package: + +Pattern | Fixture | Match | Speed (ns/op) +--------|---------|-------|-------------- +`^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my cat has very bright eyes` | `true` | 2553 +`^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my dog has very bright eyes` | `false` | 1383 +`^https:\/\/.*\.google\..*$` | `https://account.google.com` | `true` | 1205 +`^https:\/\/.*\.google\..*$` | `https://google.com` | `false` | 767 +`^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://yahoo.com` | `true` | 1435 +`^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://google.com` | `false` | 1674 +`^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `https://safe.gobwas.com` | `true` | 1039 +`^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `http://safe.gobwas.com` | `false` | 272 +`^abc.*$` | `abcdef` | `true` | 237 +`^abc.*$` | `af` | `false` | 100 +`^.*def$` | `abcdef` | `true` | 464 +`^.*def$` | `af` | `false` | 265 +`^ab.*ef$` | `abcdef` | `true` | 375 +`^ab.*ef$` | `af` | `false` | 145 + +[godoc-image]: https://godoc.org/github.com/gobwas/glob?status.svg +[godoc-url]: https://godoc.org/github.com/gobwas/glob +[travis-image]: https://travis-ci.org/gobwas/glob.svg?branch=master +[travis-url]: https://travis-ci.org/gobwas/glob + +## Syntax + +Syntax is inspired by [standard wildcards](http://tldp.org/LDP/GNU-Linux-Tools-Summary/html/x11655.htm), +except that `**` is aka super-asterisk, that do not sensitive for separators. \ No newline at end of file diff --git a/vendor/github.com/gobwas/glob/syntax/ast/ast.go b/vendor/github.com/gobwas/glob/syntax/ast/ast.go new file mode 100644 index 0000000000..3220a694a9 --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/ast/ast.go @@ -0,0 +1,122 @@ +package ast + +import ( + "bytes" + "fmt" +) + +type Node struct { + Parent *Node + Children []*Node + Value interface{} + Kind Kind +} + +func NewNode(k Kind, v interface{}, ch ...*Node) *Node { + n := &Node{ + Kind: k, + Value: v, + } + for _, c := range ch { + Insert(n, c) + } + return n +} + +func (a *Node) Equal(b *Node) bool { + if a.Kind != b.Kind { + return false + } + if a.Value != b.Value { + return false + } + if len(a.Children) != len(b.Children) { + return false + } + for i, c := range a.Children { + if !c.Equal(b.Children[i]) { + return false + } + } + return true +} + +func (a *Node) String() string { + var buf bytes.Buffer + buf.WriteString(a.Kind.String()) + if a.Value != nil { + buf.WriteString(" =") + buf.WriteString(fmt.Sprintf("%v", a.Value)) + } + if len(a.Children) > 0 { + buf.WriteString(" [") + for i, c := range a.Children { + if i > 0 { + buf.WriteString(", ") + } + buf.WriteString(c.String()) + } + buf.WriteString("]") + } + return buf.String() +} + +func Insert(parent *Node, children ...*Node) { + parent.Children = append(parent.Children, children...) + for _, ch := range children { + ch.Parent = parent + } +} + +type List struct { + Not bool + Chars string +} + +type Range struct { + Not bool + Lo, Hi rune +} + +type Text struct { + Text string +} + +type Kind int + +const ( + KindNothing Kind = iota + KindPattern + KindList + KindRange + KindText + KindAny + KindSuper + KindSingle + KindAnyOf +) + +func (k Kind) String() string { + switch k { + case KindNothing: + return "Nothing" + case KindPattern: + return "Pattern" + case KindList: + return "List" + case KindRange: + return "Range" + case KindText: + return "Text" + case KindAny: + return "Any" + case KindSuper: + return "Super" + case KindSingle: + return "Single" + case KindAnyOf: + return "AnyOf" + default: + return "" + } +} diff --git a/vendor/github.com/gobwas/glob/syntax/ast/parser.go b/vendor/github.com/gobwas/glob/syntax/ast/parser.go new file mode 100644 index 0000000000..429b409430 --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/ast/parser.go @@ -0,0 +1,157 @@ +package ast + +import ( + "errors" + "fmt" + "github.com/gobwas/glob/syntax/lexer" + "unicode/utf8" +) + +type Lexer interface { + Next() lexer.Token +} + +type parseFn func(*Node, Lexer) (parseFn, *Node, error) + +func Parse(lexer Lexer) (*Node, error) { + var parser parseFn + + root := NewNode(KindPattern, nil) + + var ( + tree *Node + err error + ) + for parser, tree = parserMain, root; parser != nil; { + parser, tree, err = parser(tree, lexer) + if err != nil { + return nil, err + } + } + + return root, nil +} + +func parserMain(tree *Node, lex Lexer) (parseFn, *Node, error) { + for { + token := lex.Next() + switch token.Type { + case lexer.EOF: + return nil, tree, nil + + case lexer.Error: + return nil, tree, errors.New(token.Raw) + + case lexer.Text: + Insert(tree, NewNode(KindText, Text{token.Raw})) + return parserMain, tree, nil + + case lexer.Any: + Insert(tree, NewNode(KindAny, nil)) + return parserMain, tree, nil + + case lexer.Super: + Insert(tree, NewNode(KindSuper, nil)) + return parserMain, tree, nil + + case lexer.Single: + Insert(tree, NewNode(KindSingle, nil)) + return parserMain, tree, nil + + case lexer.RangeOpen: + return parserRange, tree, nil + + case lexer.TermsOpen: + a := NewNode(KindAnyOf, nil) + Insert(tree, a) + + p := NewNode(KindPattern, nil) + Insert(a, p) + + return parserMain, p, nil + + case lexer.Separator: + p := NewNode(KindPattern, nil) + Insert(tree.Parent, p) + + return parserMain, p, nil + + case lexer.TermsClose: + return parserMain, tree.Parent.Parent, nil + + default: + return nil, tree, fmt.Errorf("unexpected token: %s", token) + } + } + return nil, tree, fmt.Errorf("unknown error") +} + +func parserRange(tree *Node, lex Lexer) (parseFn, *Node, error) { + var ( + not bool + lo rune + hi rune + chars string + ) + for { + token := lex.Next() + switch token.Type { + case lexer.EOF: + return nil, tree, errors.New("unexpected end") + + case lexer.Error: + return nil, tree, errors.New(token.Raw) + + case lexer.Not: + not = true + + case lexer.RangeLo: + r, w := utf8.DecodeRuneInString(token.Raw) + if len(token.Raw) > w { + return nil, tree, fmt.Errorf("unexpected length of lo character") + } + lo = r + + case lexer.RangeBetween: + // + + case lexer.RangeHi: + r, w := utf8.DecodeRuneInString(token.Raw) + if len(token.Raw) > w { + return nil, tree, fmt.Errorf("unexpected length of lo character") + } + + hi = r + + if hi < lo { + return nil, tree, fmt.Errorf("hi character '%s' should be greater than lo '%s'", string(hi), string(lo)) + } + + case lexer.Text: + chars = token.Raw + + case lexer.RangeClose: + isRange := lo != 0 && hi != 0 + isChars := chars != "" + + if isChars == isRange { + return nil, tree, fmt.Errorf("could not parse range") + } + + if isRange { + Insert(tree, NewNode(KindRange, Range{ + Lo: lo, + Hi: hi, + Not: not, + })) + } else { + Insert(tree, NewNode(KindList, List{ + Chars: chars, + Not: not, + })) + } + + return parserMain, tree, nil + } + } +} diff --git a/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go b/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go new file mode 100644 index 0000000000..a1c8d1962a --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go @@ -0,0 +1,273 @@ +package lexer + +import ( + "bytes" + "fmt" + "github.com/gobwas/glob/util/runes" + "unicode/utf8" +) + +const ( + char_any = '*' + char_comma = ',' + char_single = '?' + char_escape = '\\' + char_range_open = '[' + char_range_close = ']' + char_terms_open = '{' + char_terms_close = '}' + char_range_not = '!' + char_range_between = '-' +) + +var specials = []byte{ + char_any, + char_single, + char_escape, + char_range_open, + char_range_close, + char_terms_open, + char_terms_close, +} + +func Special(c byte) bool { + return bytes.IndexByte(specials, c) != -1 +} + +type tokens []Token + +func (i *tokens) shift() (ret Token) { + ret = (*i)[0] + copy(*i, (*i)[1:]) + *i = (*i)[:len(*i)-1] + return +} + +func (i *tokens) push(v Token) { + *i = append(*i, v) +} + +func (i *tokens) empty() bool { + return len(*i) == 0 +} + +var eof rune = 0 + +type lexer struct { + data string + pos int + err error + + tokens tokens + termsLevel int + + lastRune rune + lastRuneSize int + hasRune bool +} + +func NewLexer(source string) *lexer { + l := &lexer{ + data: source, + tokens: tokens(make([]Token, 0, 4)), + } + return l +} + +func (l *lexer) Next() Token { + if l.err != nil { + return Token{Error, l.err.Error()} + } + if !l.tokens.empty() { + return l.tokens.shift() + } + + l.fetchItem() + return l.Next() +} + +func (l *lexer) peek() (r rune, w int) { + if l.pos == len(l.data) { + return eof, 0 + } + + r, w = utf8.DecodeRuneInString(l.data[l.pos:]) + if r == utf8.RuneError { + l.errorf("could not read rune") + r = eof + w = 0 + } + + return +} + +func (l *lexer) read() rune { + if l.hasRune { + l.hasRune = false + l.seek(l.lastRuneSize) + return l.lastRune + } + + r, s := l.peek() + l.seek(s) + + l.lastRune = r + l.lastRuneSize = s + + return r +} + +func (l *lexer) seek(w int) { + l.pos += w +} + +func (l *lexer) unread() { + if l.hasRune { + l.errorf("could not unread rune") + return + } + l.seek(-l.lastRuneSize) + l.hasRune = true +} + +func (l *lexer) errorf(f string, v ...interface{}) { + l.err = fmt.Errorf(f, v...) +} + +func (l *lexer) inTerms() bool { + return l.termsLevel > 0 +} + +func (l *lexer) termsEnter() { + l.termsLevel++ +} + +func (l *lexer) termsLeave() { + l.termsLevel-- +} + +var inTextBreakers = []rune{char_single, char_any, char_range_open, char_terms_open} +var inTermsBreakers = append(inTextBreakers, char_terms_close, char_comma) + +func (l *lexer) fetchItem() { + r := l.read() + switch { + case r == eof: + l.tokens.push(Token{EOF, ""}) + + case r == char_terms_open: + l.termsEnter() + l.tokens.push(Token{TermsOpen, string(r)}) + + case r == char_comma && l.inTerms(): + l.tokens.push(Token{Separator, string(r)}) + + case r == char_terms_close && l.inTerms(): + l.tokens.push(Token{TermsClose, string(r)}) + l.termsLeave() + + case r == char_range_open: + l.tokens.push(Token{RangeOpen, string(r)}) + l.fetchRange() + + case r == char_single: + l.tokens.push(Token{Single, string(r)}) + + case r == char_any: + if l.read() == char_any { + l.tokens.push(Token{Super, string(r) + string(r)}) + } else { + l.unread() + l.tokens.push(Token{Any, string(r)}) + } + + default: + l.unread() + + var breakers []rune + if l.inTerms() { + breakers = inTermsBreakers + } else { + breakers = inTextBreakers + } + l.fetchText(breakers) + } +} + +func (l *lexer) fetchRange() { + var wantHi bool + var wantClose bool + var seenNot bool + for { + r := l.read() + if r == eof { + l.errorf("unexpected end of input") + return + } + + if wantClose { + if r != char_range_close { + l.errorf("expected close range character") + } else { + l.tokens.push(Token{RangeClose, string(r)}) + } + return + } + + if wantHi { + l.tokens.push(Token{RangeHi, string(r)}) + wantClose = true + continue + } + + if !seenNot && r == char_range_not { + l.tokens.push(Token{Not, string(r)}) + seenNot = true + continue + } + + if n, w := l.peek(); n == char_range_between { + l.seek(w) + l.tokens.push(Token{RangeLo, string(r)}) + l.tokens.push(Token{RangeBetween, string(n)}) + wantHi = true + continue + } + + l.unread() // unread first peek and fetch as text + l.fetchText([]rune{char_range_close}) + wantClose = true + } +} + +func (l *lexer) fetchText(breakers []rune) { + var data []rune + var escaped bool + +reading: + for { + r := l.read() + if r == eof { + break + } + + if !escaped { + if r == char_escape { + escaped = true + continue + } + + if runes.IndexRune(breakers, r) != -1 { + l.unread() + break reading + } + } + + escaped = false + data = append(data, r) + } + + if len(data) > 0 { + l.tokens.push(Token{Text, string(data)}) + } +} diff --git a/vendor/github.com/gobwas/glob/syntax/lexer/token.go b/vendor/github.com/gobwas/glob/syntax/lexer/token.go new file mode 100644 index 0000000000..2797c4e83a --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/lexer/token.go @@ -0,0 +1,88 @@ +package lexer + +import "fmt" + +type TokenType int + +const ( + EOF TokenType = iota + Error + Text + Char + Any + Super + Single + Not + Separator + RangeOpen + RangeClose + RangeLo + RangeHi + RangeBetween + TermsOpen + TermsClose +) + +func (tt TokenType) String() string { + switch tt { + case EOF: + return "eof" + + case Error: + return "error" + + case Text: + return "text" + + case Char: + return "char" + + case Any: + return "any" + + case Super: + return "super" + + case Single: + return "single" + + case Not: + return "not" + + case Separator: + return "separator" + + case RangeOpen: + return "range_open" + + case RangeClose: + return "range_close" + + case RangeLo: + return "range_lo" + + case RangeHi: + return "range_hi" + + case RangeBetween: + return "range_between" + + case TermsOpen: + return "terms_open" + + case TermsClose: + return "terms_close" + + default: + return "undef" + } +} + +type Token struct { + Type TokenType + Raw string +} + +func (t Token) String() string { + return fmt.Sprintf("%v<%q>", t.Type, t.Raw) +} diff --git a/vendor/github.com/gobwas/glob/syntax/syntax.go b/vendor/github.com/gobwas/glob/syntax/syntax.go new file mode 100644 index 0000000000..1d168b1482 --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/syntax.go @@ -0,0 +1,14 @@ +package syntax + +import ( + "github.com/gobwas/glob/syntax/ast" + "github.com/gobwas/glob/syntax/lexer" +) + +func Parse(s string) (*ast.Node, error) { + return ast.Parse(lexer.NewLexer(s)) +} + +func Special(b byte) bool { + return lexer.Special(b) +} diff --git a/vendor/github.com/gobwas/glob/util/runes/runes.go b/vendor/github.com/gobwas/glob/util/runes/runes.go new file mode 100644 index 0000000000..a723556410 --- /dev/null +++ b/vendor/github.com/gobwas/glob/util/runes/runes.go @@ -0,0 +1,154 @@ +package runes + +func Index(s, needle []rune) int { + ls, ln := len(s), len(needle) + + switch { + case ln == 0: + return 0 + case ln == 1: + return IndexRune(s, needle[0]) + case ln == ls: + if Equal(s, needle) { + return 0 + } + return -1 + case ln > ls: + return -1 + } + +head: + for i := 0; i < ls && ls-i >= ln; i++ { + for y := 0; y < ln; y++ { + if s[i+y] != needle[y] { + continue head + } + } + + return i + } + + return -1 +} + +func LastIndex(s, needle []rune) int { + ls, ln := len(s), len(needle) + + switch { + case ln == 0: + if ls == 0 { + return 0 + } + return ls + case ln == 1: + return IndexLastRune(s, needle[0]) + case ln == ls: + if Equal(s, needle) { + return 0 + } + return -1 + case ln > ls: + return -1 + } + +head: + for i := ls - 1; i >= 0 && i >= ln; i-- { + for y := ln - 1; y >= 0; y-- { + if s[i-(ln-y-1)] != needle[y] { + continue head + } + } + + return i - ln + 1 + } + + return -1 +} + +// IndexAny returns the index of the first instance of any Unicode code point +// from chars in s, or -1 if no Unicode code point from chars is present in s. +func IndexAny(s, chars []rune) int { + if len(chars) > 0 { + for i, c := range s { + for _, m := range chars { + if c == m { + return i + } + } + } + } + return -1 +} + +func Contains(s, needle []rune) bool { + return Index(s, needle) >= 0 +} + +func Max(s []rune) (max rune) { + for _, r := range s { + if r > max { + max = r + } + } + + return +} + +func Min(s []rune) rune { + min := rune(-1) + for _, r := range s { + if min == -1 { + min = r + continue + } + + if r < min { + min = r + } + } + + return min +} + +func IndexRune(s []rune, r rune) int { + for i, c := range s { + if c == r { + return i + } + } + return -1 +} + +func IndexLastRune(s []rune, r rune) int { + for i := len(s) - 1; i >= 0; i-- { + if s[i] == r { + return i + } + } + + return -1 +} + +func Equal(a, b []rune) bool { + if len(a) == len(b) { + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + + return true + } + + return false +} + +// HasPrefix tests whether the string s begins with prefix. +func HasPrefix(s, prefix []rune) bool { + return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix) +} + +// HasSuffix tests whether the string s ends with suffix. +func HasSuffix(s, suffix []rune) bool { + return len(s) >= len(suffix) && Equal(s[len(s)-len(suffix):], suffix) +} diff --git a/vendor/github.com/gobwas/glob/util/strings/strings.go b/vendor/github.com/gobwas/glob/util/strings/strings.go new file mode 100644 index 0000000000..e8ee1920b1 --- /dev/null +++ b/vendor/github.com/gobwas/glob/util/strings/strings.go @@ -0,0 +1,39 @@ +package strings + +import ( + "strings" + "unicode/utf8" +) + +func IndexAnyRunes(s string, rs []rune) int { + for _, r := range rs { + if i := strings.IndexRune(s, r); i != -1 { + return i + } + } + + return -1 +} + +func LastIndexAnyRunes(s string, rs []rune) int { + for _, r := range rs { + i := -1 + if 0 <= r && r < utf8.RuneSelf { + i = strings.LastIndexByte(s, byte(r)) + } else { + sub := s + for len(sub) > 0 { + j := strings.IndexRune(s, r) + if j == -1 { + break + } + i = j + sub = sub[i+1:] + } + } + if i != -1 { + return i + } + } + return -1 +} diff --git a/vendor/github.com/gofrs/uuid/README.md b/vendor/github.com/gofrs/uuid/README.md index f5db14f07f..4f73bec82c 100644 --- a/vendor/github.com/gofrs/uuid/README.md +++ b/vendor/github.com/gofrs/uuid/README.md @@ -17,7 +17,7 @@ This package supports the following UUID versions: * Version 5, based on SHA-1 hashing of a named value (RFC-4122) This package also supports experimental Universally Unique Identifier implementations based on a -[draft RFC](https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03) that updates RFC-4122 +[draft RFC](https://www.ietf.org/archive/id/draft-peabody-dispatch-new-uuid-format-04.html) that updates RFC-4122 * Version 6, a k-sortable id based on timestamp, and field-compatible with v1 (draft-peabody-dispatch-new-uuid-format, RFC-4122) * Version 7, a k-sortable id based on timestamp (draft-peabody-dispatch-new-uuid-format, RFC-4122) @@ -114,4 +114,4 @@ func main() { * [RFC-4122](https://tools.ietf.org/html/rfc4122) * [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01) -* [New UUID Formats RFC Draft (Peabody) Rev 03](https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03) +* [New UUID Formats RFC Draft (Peabody) Rev 04](https://www.ietf.org/archive/id/draft-peabody-dispatch-new-uuid-format-04.html#) diff --git a/vendor/github.com/gofrs/uuid/codec.go b/vendor/github.com/gofrs/uuid/codec.go index e3014c68c6..665026414c 100644 --- a/vendor/github.com/gofrs/uuid/codec.go +++ b/vendor/github.com/gofrs/uuid/codec.go @@ -22,8 +22,7 @@ package uuid import ( - "bytes" - "encoding/hex" + "errors" "fmt" ) @@ -45,11 +44,77 @@ func FromBytesOrNil(input []byte) UUID { return uuid } +var errInvalidFormat = errors.New("uuid: invalid UUID format") + +func fromHexChar(c byte) byte { + switch { + case '0' <= c && c <= '9': + return c - '0' + case 'a' <= c && c <= 'f': + return c - 'a' + 10 + case 'A' <= c && c <= 'F': + return c - 'A' + 10 + } + return 255 +} + +// Parse parses the UUID stored in the string text. Parsing and supported +// formats are the same as UnmarshalText. +func (u *UUID) Parse(s string) error { + switch len(s) { + case 32: // hash + case 36: // canonical + case 34, 38: + if s[0] != '{' || s[len(s)-1] != '}' { + return fmt.Errorf("uuid: incorrect UUID format in string %q", s) + } + s = s[1 : len(s)-1] + case 41, 45: + if s[:9] != "urn:uuid:" { + return fmt.Errorf("uuid: incorrect UUID format in string %q", s[:9]) + } + s = s[9:] + default: + return fmt.Errorf("uuid: incorrect UUID length %d in string %q", len(s), s) + } + // canonical + if len(s) == 36 { + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return fmt.Errorf("uuid: incorrect UUID format in string %q", s) + } + for i, x := range [16]byte{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34, + } { + v1 := fromHexChar(s[x]) + v2 := fromHexChar(s[x+1]) + if v1|v2 == 255 { + return errInvalidFormat + } + u[i] = (v1 << 4) | v2 + } + return nil + } + // hash like + for i := 0; i < 32; i += 2 { + v1 := fromHexChar(s[i]) + v2 := fromHexChar(s[i+1]) + if v1|v2 == 255 { + return errInvalidFormat + } + u[i/2] = (v1 << 4) | v2 + } + return nil +} + // FromString returns a UUID parsed from the input string. // Input is expected in a form accepted by UnmarshalText. -func FromString(input string) (UUID, error) { - u := UUID{} - err := u.UnmarshalText([]byte(input)) +func FromString(text string) (UUID, error) { + var u UUID + err := u.Parse(text) return u, err } @@ -66,133 +131,90 @@ func FromStringOrNil(input string) UUID { // MarshalText implements the encoding.TextMarshaler interface. // The encoding is the same as returned by the String() method. func (u UUID) MarshalText() ([]byte, error) { - return []byte(u.String()), nil + var buf [36]byte + encodeCanonical(buf[:], u) + return buf[:], nil } // UnmarshalText implements the encoding.TextUnmarshaler interface. // Following formats are supported: // -// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", -// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", -// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" -// "6ba7b8109dad11d180b400c04fd430c8" -// "{6ba7b8109dad11d180b400c04fd430c8}", -// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8" +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", +// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", +// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" +// "6ba7b8109dad11d180b400c04fd430c8" +// "{6ba7b8109dad11d180b400c04fd430c8}", +// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8" // // ABNF for supported UUID text representation follows: // -// URN := 'urn' -// UUID-NID := 'uuid' -// -// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | -// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | -// 'A' | 'B' | 'C' | 'D' | 'E' | 'F' +// URN := 'urn' +// UUID-NID := 'uuid' // -// hexoct := hexdig hexdig -// 2hexoct := hexoct hexoct -// 4hexoct := 2hexoct 2hexoct -// 6hexoct := 4hexoct 2hexoct -// 12hexoct := 6hexoct 6hexoct +// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | +// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | +// 'A' | 'B' | 'C' | 'D' | 'E' | 'F' // -// hashlike := 12hexoct -// canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct +// hexoct := hexdig hexdig +// 2hexoct := hexoct hexoct +// 4hexoct := 2hexoct 2hexoct +// 6hexoct := 4hexoct 2hexoct +// 12hexoct := 6hexoct 6hexoct // -// plain := canonical | hashlike -// uuid := canonical | hashlike | braced | urn +// hashlike := 12hexoct +// canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct // -// braced := '{' plain '}' | '{' hashlike '}' -// urn := URN ':' UUID-NID ':' plain +// plain := canonical | hashlike +// uuid := canonical | hashlike | braced | urn // -func (u *UUID) UnmarshalText(text []byte) error { - switch len(text) { - case 32: - return u.decodeHashLike(text) +// braced := '{' plain '}' | '{' hashlike '}' +// urn := URN ':' UUID-NID ':' plain +func (u *UUID) UnmarshalText(b []byte) error { + switch len(b) { + case 32: // hash + case 36: // canonical case 34, 38: - return u.decodeBraced(text) - case 36: - return u.decodeCanonical(text) + if b[0] != '{' || b[len(b)-1] != '}' { + return fmt.Errorf("uuid: incorrect UUID format in string %q", b) + } + b = b[1 : len(b)-1] case 41, 45: - return u.decodeURN(text) + if string(b[:9]) != "urn:uuid:" { + return fmt.Errorf("uuid: incorrect UUID format in string %q", b[:9]) + } + b = b[9:] default: - return fmt.Errorf("uuid: incorrect UUID length %d in string %q", len(text), text) - } -} - -// decodeCanonical decodes UUID strings that are formatted as defined in RFC-4122 (section 3): -// "6ba7b810-9dad-11d1-80b4-00c04fd430c8". -func (u *UUID) decodeCanonical(t []byte) error { - if t[8] != '-' || t[13] != '-' || t[18] != '-' || t[23] != '-' { - return fmt.Errorf("uuid: incorrect UUID format in string %q", t) + return fmt.Errorf("uuid: incorrect UUID length %d in string %q", len(b), b) } - - src := t - dst := u[:] - - for i, byteGroup := range byteGroups { - if i > 0 { - src = src[1:] // skip dash + if len(b) == 36 { + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return fmt.Errorf("uuid: incorrect UUID format in string %q", b) } - _, err := hex.Decode(dst[:byteGroup/2], src[:byteGroup]) - if err != nil { - return err + for i, x := range [16]byte{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34, + } { + v1 := fromHexChar(b[x]) + v2 := fromHexChar(b[x+1]) + if v1|v2 == 255 { + return errInvalidFormat + } + u[i] = (v1 << 4) | v2 } - src = src[byteGroup:] - dst = dst[byteGroup/2:] - } - - return nil -} - -// decodeHashLike decodes UUID strings that are using the following format: -// "6ba7b8109dad11d180b400c04fd430c8". -func (u *UUID) decodeHashLike(t []byte) error { - src := t[:] - dst := u[:] - - _, err := hex.Decode(dst, src) - return err -} - -// decodeBraced decodes UUID strings that are using the following formats: -// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" -// "{6ba7b8109dad11d180b400c04fd430c8}". -func (u *UUID) decodeBraced(t []byte) error { - l := len(t) - - if t[0] != '{' || t[l-1] != '}' { - return fmt.Errorf("uuid: incorrect UUID format in string %q", t) + return nil } - - return u.decodePlain(t[1 : l-1]) -} - -// decodeURN decodes UUID strings that are using the following formats: -// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" -// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8". -func (u *UUID) decodeURN(t []byte) error { - total := len(t) - - urnUUIDPrefix := t[:9] - - if !bytes.Equal(urnUUIDPrefix, urnPrefix) { - return fmt.Errorf("uuid: incorrect UUID format in string %q", t) - } - - return u.decodePlain(t[9:total]) -} - -// decodePlain decodes UUID strings that are using the following formats: -// "6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in hash-like format -// "6ba7b8109dad11d180b400c04fd430c8". -func (u *UUID) decodePlain(t []byte) error { - switch len(t) { - case 32: - return u.decodeHashLike(t) - case 36: - return u.decodeCanonical(t) - default: - return fmt.Errorf("uuid: incorrect UUID length %d in string %q", len(t), t) + for i := 0; i < 32; i += 2 { + v1 := fromHexChar(b[i]) + v2 := fromHexChar(b[i+1]) + if v1|v2 == 255 { + return errInvalidFormat + } + u[i/2] = (v1 << 4) | v2 } + return nil } // MarshalBinary implements the encoding.BinaryMarshaler interface. diff --git a/vendor/github.com/gofrs/uuid/fuzz.go b/vendor/github.com/gofrs/uuid/fuzz.go index afaefbc8e3..ccf8d4ca29 100644 --- a/vendor/github.com/gofrs/uuid/fuzz.go +++ b/vendor/github.com/gofrs/uuid/fuzz.go @@ -19,6 +19,7 @@ // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +//go:build gofuzz // +build gofuzz package uuid @@ -27,15 +28,15 @@ package uuid // // To run: // -// $ go get github.com/dvyukov/go-fuzz/... -// $ cd $GOPATH/src/github.com/gofrs/uuid -// $ go-fuzz-build github.com/gofrs/uuid -// $ go-fuzz -bin=uuid-fuzz.zip -workdir=./testdata +// $ go get github.com/dvyukov/go-fuzz/... +// $ cd $GOPATH/src/github.com/gofrs/uuid +// $ go-fuzz-build github.com/gofrs/uuid +// $ go-fuzz -bin=uuid-fuzz.zip -workdir=./testdata // // If you make significant changes to FromString / UnmarshalText and add // new cases to fromStringTests (in codec_test.go), please run // -// $ go test -seed_fuzz_corpus +// $ go test -seed_fuzz_corpus // // to seed the corpus with the new interesting inputs, then run the fuzzer. func Fuzz(data []byte) int { diff --git a/vendor/github.com/gofrs/uuid/generator.go b/vendor/github.com/gofrs/uuid/generator.go index 4550bc6b34..44be9e1585 100644 --- a/vendor/github.com/gofrs/uuid/generator.go +++ b/vendor/github.com/gofrs/uuid/generator.go @@ -38,7 +38,8 @@ import ( // UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970). const epochStart = 122192928000000000 -type epochFunc func() time.Time +// EpochFunc is the function type used to provide the current time. +type EpochFunc func() time.Time // HWAddrFunc is the function type used to provide hardware (MAC) addresses. type HWAddrFunc func() (net.HardwareAddr, error) @@ -80,9 +81,9 @@ func NewV6() (UUID, error) { } // NewV7 returns a k-sortable UUID based on the current millisecond precision -// UNIX epoch and 74 bits of pseudorandom data. +// UNIX epoch and 74 bits of pseudorandom data. It supports single-node batch generation (multiple UUIDs in the same timestamp) with a Monotonic Random counter. // -// This is implemented based on revision 03 of the Peabody UUID draft, and may +// This is implemented based on revision 04 of the Peabody UUID draft, and may // be subject to change pending further revisions. Until the final specification // revision is finished, changes required to implement updates to the spec will // not be considered a breaking change. They will happen as a minor version @@ -119,13 +120,16 @@ type Gen struct { rand io.Reader - epochFunc epochFunc + epochFunc EpochFunc hwAddrFunc HWAddrFunc lastTime uint64 clockSequence uint16 hardwareAddr [6]byte } +// GenOption is a function type that can be used to configure a Gen generator. +type GenOption func(*Gen) + // interface check -- build will fail if *Gen doesn't satisfy Generator var _ Generator = (*Gen)(nil) @@ -147,18 +151,82 @@ func NewGen() *Gen { // MAC address being used, you'll need to create a new generator using this // function. func NewGenWithHWAF(hwaf HWAddrFunc) *Gen { - return &Gen{ + return NewGenWithOptions(WithHWAddrFunc(hwaf)) +} + +// NewGenWithOptions returns a new instance of Gen with the options provided. +// Most people should use NewGen() or NewGenWithHWAF() instead. +// +// To customize the generator, you can pass in one or more GenOption functions. +// For example: +// +// gen := NewGenWithOptions( +// WithHWAddrFunc(myHWAddrFunc), +// WithEpochFunc(myEpochFunc), +// WithRandomReader(myRandomReader), +// ) +// +// NewGenWithOptions(WithHWAddrFunc(myHWAddrFunc)) is equivalent to calling +// NewGenWithHWAF(myHWAddrFunc) +// NewGenWithOptions() is equivalent to calling NewGen() +func NewGenWithOptions(opts ...GenOption) *Gen { + gen := &Gen{ epochFunc: time.Now, - hwAddrFunc: hwaf, + hwAddrFunc: defaultHWAddrFunc, rand: rand.Reader, } + + for _, opt := range opts { + opt(gen) + } + + return gen +} + +// WithHWAddrFunc is a GenOption that allows you to provide your own HWAddrFunc +// function. +// When this option is nil, the defaultHWAddrFunc is used. +func WithHWAddrFunc(hwaf HWAddrFunc) GenOption { + return func(gen *Gen) { + if hwaf == nil { + hwaf = defaultHWAddrFunc + } + + gen.hwAddrFunc = hwaf + } +} + +// WithEpochFunc is a GenOption that allows you to provide your own EpochFunc +// function. +// When this option is nil, time.Now is used. +func WithEpochFunc(epochf EpochFunc) GenOption { + return func(gen *Gen) { + if epochf == nil { + epochf = time.Now + } + + gen.epochFunc = epochf + } +} + +// WithRandomReader is a GenOption that allows you to provide your own random +// reader. +// When this option is nil, the default rand.Reader is used. +func WithRandomReader(reader io.Reader) GenOption { + return func(gen *Gen) { + if reader == nil { + reader = rand.Reader + } + + gen.rand = reader + } } // NewV1 returns a UUID based on the current timestamp and MAC address. func (g *Gen) NewV1() (UUID, error) { u := UUID{} - timeNow, clockSeq, err := g.getClockSequence() + timeNow, clockSeq, err := g.getClockSequence(false) if err != nil { return Nil, err } @@ -225,7 +293,7 @@ func (g *Gen) NewV6() (UUID, error) { return Nil, err } - timeNow, clockSeq, err := g.getClockSequence() + timeNow, clockSeq, err := g.getClockSequence(false) if err != nil { return Nil, err } @@ -241,8 +309,12 @@ func (g *Gen) NewV6() (UUID, error) { return u, nil } -// getClockSequence returns the epoch and clock sequence for V1 and V6 UUIDs. -func (g *Gen) getClockSequence() (uint64, uint16, error) { +// getClockSequence returns the epoch and clock sequence for V1,V6 and V7 UUIDs. +// +// When useUnixTSMs is false, it uses the Coordinated Universal Time (UTC) as a count of 100- +// +// nanosecond intervals since 00:00:00.00, 15 October 1582 (the date of Gregorian reform to the Christian calendar). +func (g *Gen) getClockSequence(useUnixTSMs bool) (uint64, uint16, error) { var err error g.clockSequenceOnce.Do(func() { buf := make([]byte, 2) @@ -258,7 +330,12 @@ func (g *Gen) getClockSequence() (uint64, uint16, error) { g.storageMutex.Lock() defer g.storageMutex.Unlock() - timeNow := g.getEpoch() + var timeNow uint64 + if useUnixTSMs { + timeNow = uint64(g.epochFunc().UnixMilli()) + } else { + timeNow = g.getEpoch() + } // Clock didn't change since last UUID generation. // Should increase clock sequence. if timeNow <= g.lastTime { @@ -272,28 +349,51 @@ func (g *Gen) getClockSequence() (uint64, uint16, error) { // NewV7 returns a k-sortable UUID based on the current millisecond precision // UNIX epoch and 74 bits of pseudorandom data. // -// This is implemented based on revision 03 of the Peabody UUID draft, and may +// This is implemented based on revision 04 of the Peabody UUID draft, and may // be subject to change pending further revisions. Until the final specification // revision is finished, changes required to implement updates to the spec will // not be considered a breaking change. They will happen as a minor version // releases until the spec is final. func (g *Gen) NewV7() (UUID, error) { var u UUID - - if _, err := io.ReadFull(g.rand, u[6:]); err != nil { + /* https://www.ietf.org/archive/id/draft-peabody-dispatch-new-uuid-format-04.html#name-uuid-version-7 + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | ver | rand_a | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |var| rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ + + ms, clockSeq, err := g.getClockSequence(true) + if err != nil { return Nil, err } - - tn := g.epochFunc() - ms := uint64(tn.Unix())*1e3 + uint64(tn.Nanosecond())/1e6 - u[0] = byte(ms >> 40) + //UUIDv7 features a 48 bit timestamp. First 32bit (4bytes) represents seconds since 1970, followed by 2 bytes for the ms granularity. + u[0] = byte(ms >> 40) //1-6 bytes: big-endian unsigned number of Unix epoch timestamp u[1] = byte(ms >> 32) u[2] = byte(ms >> 24) u[3] = byte(ms >> 16) u[4] = byte(ms >> 8) u[5] = byte(ms) + //support batching by using a monotonic pseudo-random sequence + //The 6th byte contains the version and partially rand_a data. + //We will lose the most significant bites from the clockSeq (with SetVersion), but it is ok, we need the least significant that contains the counter to ensure the monotonic property + binary.BigEndian.PutUint16(u[6:8], clockSeq) // set rand_a with clock seq which is random and monotonic + + //override first 4bits of u[6]. u.SetVersion(V7) + + //set rand_b 64bits of pseudo-random bits (first 2 will be overridden) + if _, err = io.ReadFull(g.rand, u[8:16]); err != nil { + return Nil, err + } + //override first 2 bits of byte[8] for the variant u.SetVariant(VariantRFC4122) return u, nil diff --git a/vendor/github.com/gofrs/uuid/sql.go b/vendor/github.com/gofrs/uuid/sql.go index 6f254a4fd1..01d5d88496 100644 --- a/vendor/github.com/gofrs/uuid/sql.go +++ b/vendor/github.com/gofrs/uuid/sql.go @@ -22,12 +22,14 @@ package uuid import ( - "bytes" + "database/sql" "database/sql/driver" - "encoding/json" "fmt" ) +var _ driver.Valuer = UUID{} +var _ sql.Scanner = (*UUID)(nil) + // Value implements the driver.Valuer interface. func (u UUID) Value() (driver.Value, error) { return u.String(), nil @@ -49,7 +51,9 @@ func (u *UUID) Scan(src interface{}) error { return u.UnmarshalText(src) case string: - return u.UnmarshalText([]byte(src)) + uu, err := FromString(src) + *u = uu + return err } return fmt.Errorf("uuid: cannot convert %T to UUID", src) @@ -83,27 +87,30 @@ func (u *NullUUID) Scan(src interface{}) error { return u.UUID.Scan(src) } +var nullJSON = []byte("null") + // MarshalJSON marshals the NullUUID as null or the nested UUID func (u NullUUID) MarshalJSON() ([]byte, error) { if !u.Valid { - return json.Marshal(nil) + return nullJSON, nil } - - return json.Marshal(u.UUID) + var buf [38]byte + buf[0] = '"' + encodeCanonical(buf[1:37], u.UUID) + buf[37] = '"' + return buf[:], nil } // UnmarshalJSON unmarshals a NullUUID func (u *NullUUID) UnmarshalJSON(b []byte) error { - if bytes.Equal(b, []byte("null")) { + if string(b) == "null" { u.UUID, u.Valid = Nil, false return nil } - - if err := json.Unmarshal(b, &u.UUID); err != nil { - return err + if n := len(b); n >= 2 && b[0] == '"' { + b = b[1 : n-1] } - - u.Valid = true - - return nil + err := u.UUID.UnmarshalText(b) + u.Valid = (err == nil) + return err } diff --git a/vendor/github.com/gofrs/uuid/uuid.go b/vendor/github.com/gofrs/uuid/uuid.go index e747e54125..5320fb5389 100644 --- a/vendor/github.com/gofrs/uuid/uuid.go +++ b/vendor/github.com/gofrs/uuid/uuid.go @@ -44,8 +44,6 @@ import ( "encoding/binary" "encoding/hex" "fmt" - "io" - "strings" "time" ) @@ -133,12 +131,6 @@ func TimestampFromV6(u UUID) (Timestamp, error) { return Timestamp(uint64(low) + (uint64(mid) << 12) + (uint64(hi) << 28)), nil } -// String parse helpers. -var ( - urnPrefix = []byte("urn:uuid:") - byteGroups = []int{8, 4, 4, 4, 12} -) - // Nil is the nil UUID, as specified in RFC-4122, that has all 128 bits set to // zero. var Nil = UUID{} @@ -182,22 +174,33 @@ func (u UUID) Bytes() []byte { return u[:] } +// encodeCanonical encodes the canonical RFC-4122 form of UUID u into the +// first 36 bytes dst. +func encodeCanonical(dst []byte, u UUID) { + const hextable = "0123456789abcdef" + dst[8] = '-' + dst[13] = '-' + dst[18] = '-' + dst[23] = '-' + for i, x := range [16]byte{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34, + } { + c := u[i] + dst[x] = hextable[c>>4] + dst[x+1] = hextable[c&0x0f] + } +} + // String returns a canonical RFC-4122 string representation of the UUID: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. func (u UUID) String() string { - buf := make([]byte, 36) - - hex.Encode(buf[0:8], u[0:4]) - buf[8] = '-' - hex.Encode(buf[9:13], u[4:6]) - buf[13] = '-' - hex.Encode(buf[14:18], u[6:8]) - buf[18] = '-' - hex.Encode(buf[19:23], u[8:10]) - buf[23] = '-' - hex.Encode(buf[24:], u[10:]) - - return string(buf) + var buf [36]byte + encodeCanonical(buf[:], u) + return string(buf[:]) } // Format implements fmt.Formatter for UUID values. @@ -210,52 +213,41 @@ func (u UUID) String() string { // All other verbs not handled directly by the fmt package (like '%p') are unsupported and will return // "%!verb(uuid.UUID=value)" as recommended by the fmt package. func (u UUID) Format(f fmt.State, c rune) { + if c == 'v' && f.Flag('#') { + fmt.Fprintf(f, "%#v", [Size]byte(u)) + return + } switch c { case 'x', 'X': - s := hex.EncodeToString(u.Bytes()) + b := make([]byte, 32) + hex.Encode(b, u[:]) if c == 'X' { - s = strings.Map(toCapitalHexDigits, s) - } - _, _ = io.WriteString(f, s) - case 'v': - var s string - if f.Flag('#') { - s = fmt.Sprintf("%#v", [Size]byte(u)) - } else { - s = u.String() + toUpperHex(b) } - _, _ = io.WriteString(f, s) - case 's', 'S': - s := u.String() + _, _ = f.Write(b) + case 'v', 's', 'S': + b, _ := u.MarshalText() if c == 'S' { - s = strings.Map(toCapitalHexDigits, s) + toUpperHex(b) } - _, _ = io.WriteString(f, s) + _, _ = f.Write(b) case 'q': - _, _ = io.WriteString(f, `"`+u.String()+`"`) + b := make([]byte, 38) + b[0] = '"' + encodeCanonical(b[1:], u) + b[37] = '"' + _, _ = f.Write(b) default: // invalid/unsupported format verb fmt.Fprintf(f, "%%!%c(uuid.UUID=%s)", c, u.String()) } } -func toCapitalHexDigits(ch rune) rune { - // convert a-f hex digits to A-F - switch ch { - case 'a': - return 'A' - case 'b': - return 'B' - case 'c': - return 'C' - case 'd': - return 'D' - case 'e': - return 'E' - case 'f': - return 'F' - default: - return ch +func toUpperHex(b []byte) { + for i, c := range b { + if 'a' <= c && c <= 'f' { + b[i] = c - ('a' - 'A') + } } } @@ -283,7 +275,8 @@ func (u *UUID) SetVariant(v byte) { // Must is a helper that wraps a call to a function returning (UUID, error) // and panics if the error is non-nil. It is intended for use in variable // initializations such as -// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000")) +// +// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000")) func Must(u UUID, err error) UUID { if err != nil { panic(err) diff --git a/vendor/github.com/golang/groupcache/LICENSE b/vendor/github.com/golang/groupcache/LICENSE deleted file mode 100644 index 37ec93a14f..0000000000 --- a/vendor/github.com/golang/groupcache/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/golang/groupcache/lru/lru.go b/vendor/github.com/golang/groupcache/lru/lru.go deleted file mode 100644 index eac1c7664f..0000000000 --- a/vendor/github.com/golang/groupcache/lru/lru.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Copyright 2013 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package lru implements an LRU cache. -package lru - -import "container/list" - -// Cache is an LRU cache. It is not safe for concurrent access. -type Cache struct { - // MaxEntries is the maximum number of cache entries before - // an item is evicted. Zero means no limit. - MaxEntries int - - // OnEvicted optionally specifies a callback function to be - // executed when an entry is purged from the cache. - OnEvicted func(key Key, value interface{}) - - ll *list.List - cache map[interface{}]*list.Element -} - -// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators -type Key interface{} - -type entry struct { - key Key - value interface{} -} - -// New creates a new Cache. -// If maxEntries is zero, the cache has no limit and it's assumed -// that eviction is done by the caller. -func New(maxEntries int) *Cache { - return &Cache{ - MaxEntries: maxEntries, - ll: list.New(), - cache: make(map[interface{}]*list.Element), - } -} - -// Add adds a value to the cache. -func (c *Cache) Add(key Key, value interface{}) { - if c.cache == nil { - c.cache = make(map[interface{}]*list.Element) - c.ll = list.New() - } - if ee, ok := c.cache[key]; ok { - c.ll.MoveToFront(ee) - ee.Value.(*entry).value = value - return - } - ele := c.ll.PushFront(&entry{key, value}) - c.cache[key] = ele - if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries { - c.RemoveOldest() - } -} - -// Get looks up a key's value from the cache. -func (c *Cache) Get(key Key) (value interface{}, ok bool) { - if c.cache == nil { - return - } - if ele, hit := c.cache[key]; hit { - c.ll.MoveToFront(ele) - return ele.Value.(*entry).value, true - } - return -} - -// Remove removes the provided key from the cache. -func (c *Cache) Remove(key Key) { - if c.cache == nil { - return - } - if ele, hit := c.cache[key]; hit { - c.removeElement(ele) - } -} - -// RemoveOldest removes the oldest item from the cache. -func (c *Cache) RemoveOldest() { - if c.cache == nil { - return - } - ele := c.ll.Back() - if ele != nil { - c.removeElement(ele) - } -} - -func (c *Cache) removeElement(e *list.Element) { - c.ll.Remove(e) - kv := e.Value.(*entry) - delete(c.cache, kv.key) - if c.OnEvicted != nil { - c.OnEvicted(kv.key, kv.value) - } -} - -// Len returns the number of items in the cache. -func (c *Cache) Len() int { - if c.cache == nil { - return 0 - } - return c.ll.Len() -} - -// Clear purges all stored items from the cache. -func (c *Cache) Clear() { - if c.OnEvicted != nil { - for _, e := range c.cache { - kv := e.Value.(*entry) - c.OnEvicted(kv.key, kv.value) - } - } - c.ll = nil - c.cache = nil -} diff --git a/vendor/github.com/google/go-containerregistry/internal/compression/compression.go b/vendor/github.com/google/go-containerregistry/internal/compression/compression.go new file mode 100644 index 0000000000..012487150f --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/internal/compression/compression.go @@ -0,0 +1,97 @@ +// Copyright 2022 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package compression abstracts over gzip and zstd. +package compression + +import ( + "bufio" + "bytes" + "io" + + "github.com/google/go-containerregistry/internal/gzip" + "github.com/google/go-containerregistry/internal/zstd" + "github.com/google/go-containerregistry/pkg/compression" +) + +// Opener represents e.g. opening a file. +type Opener = func() (io.ReadCloser, error) + +// GetCompression detects whether an Opener is compressed and which algorithm is used. +func GetCompression(opener Opener) (compression.Compression, error) { + rc, err := opener() + if err != nil { + return compression.None, err + } + defer rc.Close() + + cp, _, err := PeekCompression(rc) + if err != nil { + return compression.None, err + } + + return cp, nil +} + +// PeekCompression detects whether the input stream is compressed and which algorithm is used. +// +// If r implements Peek, we will use that directly, otherwise a small number +// of bytes are buffered to Peek at the gzip/zstd header, and the returned +// PeekReader can be used as a replacement for the consumed input io.Reader. +func PeekCompression(r io.Reader) (compression.Compression, PeekReader, error) { + pr := intoPeekReader(r) + + if isGZip, _, err := checkHeader(pr, gzip.MagicHeader); err != nil { + return compression.None, pr, err + } else if isGZip { + return compression.GZip, pr, nil + } + + if isZStd, _, err := checkHeader(pr, zstd.MagicHeader); err != nil { + return compression.None, pr, err + } else if isZStd { + return compression.ZStd, pr, nil + } + + return compression.None, pr, nil +} + +// PeekReader is an io.Reader that also implements Peek a la bufio.Reader. +type PeekReader interface { + io.Reader + Peek(n int) ([]byte, error) +} + +// IntoPeekReader creates a PeekReader from an io.Reader. +// If the reader already has a Peek method, it will just return the passed reader. +func intoPeekReader(r io.Reader) PeekReader { + if p, ok := r.(PeekReader); ok { + return p + } + + return bufio.NewReader(r) +} + +// CheckHeader checks whether the first bytes from a PeekReader match an expected header +func checkHeader(pr PeekReader, expectedHeader []byte) (bool, PeekReader, error) { + header, err := pr.Peek(len(expectedHeader)) + if err != nil { + // https://github.com/google/go-containerregistry/issues/367 + if err == io.EOF { + return false, pr, nil + } + return false, pr, err + } + return bytes.Equal(header, expectedHeader), pr, nil +} diff --git a/vendor/github.com/google/go-containerregistry/internal/gzip/zip.go b/vendor/github.com/google/go-containerregistry/internal/gzip/zip.go index 1a52694e8d..018c0f8c0f 100644 --- a/vendor/github.com/google/go-containerregistry/internal/gzip/zip.go +++ b/vendor/github.com/google/go-containerregistry/internal/gzip/zip.go @@ -24,7 +24,8 @@ import ( "github.com/google/go-containerregistry/internal/and" ) -var gzipMagicHeader = []byte{'\x1f', '\x8b'} +// MagicHeader is the start of gzip files. +var MagicHeader = []byte{'\x1f', '\x8b'} // ReadCloser reads uncompressed input data from the io.ReadCloser and // returns an io.ReadCloser from which compressed data may be read. @@ -84,7 +85,7 @@ func ReadCloserLevel(r io.ReadCloser, level int) io.ReadCloser { } // UnzipReadCloser reads compressed input data from the io.ReadCloser and -// returns an io.ReadCloser from which uncompessed data may be read. +// returns an io.ReadCloser from which uncompressed data may be read. func UnzipReadCloser(r io.ReadCloser) (io.ReadCloser, error) { gr, err := gzip.NewReader(r) if err != nil { @@ -113,34 +114,5 @@ func Is(r io.Reader) (bool, error) { if err != nil { return false, err } - return bytes.Equal(magicHeader, gzipMagicHeader), nil -} - -// PeekReader is an io.Reader that also implements Peek a la bufio.Reader. -type PeekReader interface { - io.Reader - Peek(n int) ([]byte, error) -} - -// Peek detects whether the input stream is gzip compressed. -// -// If r implements Peek, we will use that directly, otherwise a small number -// of bytes are buffered to Peek at the gzip header, and the returned -// PeekReader can be used as a replacement for the consumed input io.Reader. -func Peek(r io.Reader) (bool, PeekReader, error) { - var pr PeekReader - if p, ok := r.(PeekReader); ok { - pr = p - } else { - pr = bufio.NewReader(r) - } - header, err := pr.Peek(2) - if err != nil { - // https://github.com/google/go-containerregistry/issues/367 - if err == io.EOF { - return false, pr, nil - } - return false, pr, err - } - return bytes.Equal(header, gzipMagicHeader), pr, nil + return bytes.Equal(magicHeader, MagicHeader), nil } diff --git a/vendor/github.com/google/go-containerregistry/internal/retry/retry.go b/vendor/github.com/google/go-containerregistry/internal/retry/retry.go index ad6dcbb753..c9e3564504 100644 --- a/vendor/github.com/google/go-containerregistry/internal/retry/retry.go +++ b/vendor/github.com/google/go-containerregistry/internal/retry/retry.go @@ -76,3 +76,19 @@ func Retry(f func() error, p Predicate, backoff wait.Backoff) (err error) { wait.ExponentialBackoff(backoff, condition) return } + +type contextKey string + +var key = contextKey("never") + +// Never returns a context that signals something should not be retried. +// This is a hack and can be used to communicate across package boundaries +// to avoid retry amplification. +func Never(ctx context.Context) context.Context { + return context.WithValue(ctx, key, true) +} + +// Ever returns true if the context was wrapped by Never. +func Ever(ctx context.Context) bool { + return ctx.Value(key) == nil +} diff --git a/vendor/github.com/google/go-containerregistry/internal/zstd/zstd.go b/vendor/github.com/google/go-containerregistry/internal/zstd/zstd.go new file mode 100644 index 0000000000..cccf54a308 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/internal/zstd/zstd.go @@ -0,0 +1,116 @@ +// Copyright 2022 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package zstd provides helper functions for interacting with zstd streams. +package zstd + +import ( + "bufio" + "bytes" + "io" + + "github.com/google/go-containerregistry/internal/and" + "github.com/klauspost/compress/zstd" +) + +// MagicHeader is the start of zstd files. +var MagicHeader = []byte{'\x28', '\xb5', '\x2f', '\xfd'} + +// ReadCloser reads uncompressed input data from the io.ReadCloser and +// returns an io.ReadCloser from which compressed data may be read. +// This uses zstd level 1 for the compression. +func ReadCloser(r io.ReadCloser) io.ReadCloser { + return ReadCloserLevel(r, 1) +} + +// ReadCloserLevel reads uncompressed input data from the io.ReadCloser and +// returns an io.ReadCloser from which compressed data may be read. +func ReadCloserLevel(r io.ReadCloser, level int) io.ReadCloser { + pr, pw := io.Pipe() + + // For highly compressible layers, zstd.Writer will output a very small + // number of bytes per Write(). This is normally fine, but when pushing + // to a registry, we want to ensure that we're taking full advantage of + // the available bandwidth instead of sending tons of tiny writes over + // the wire. + // 64K ought to be small enough for anybody. + bw := bufio.NewWriterSize(pw, 2<<16) + + // Returns err so we can pw.CloseWithError(err) + go func() error { + // TODO(go1.14): Just defer {pw,zw,r}.Close like you'd expect. + // Context: https://golang.org/issue/24283 + zw, err := zstd.NewWriter(bw, zstd.WithEncoderLevel(zstd.EncoderLevelFromZstd(level))) + if err != nil { + return pw.CloseWithError(err) + } + + if _, err := io.Copy(zw, r); err != nil { + defer r.Close() + defer zw.Close() + return pw.CloseWithError(err) + } + + // Close zstd writer to Flush it and write zstd trailers. + if err := zw.Close(); err != nil { + return pw.CloseWithError(err) + } + + // Flush bufio writer to ensure we write out everything. + if err := bw.Flush(); err != nil { + return pw.CloseWithError(err) + } + + // We don't really care if these fail. + defer pw.Close() + defer r.Close() + + return nil + }() + + return pr +} + +// UnzipReadCloser reads compressed input data from the io.ReadCloser and +// returns an io.ReadCloser from which uncompressed data may be read. +func UnzipReadCloser(r io.ReadCloser) (io.ReadCloser, error) { + gr, err := zstd.NewReader(r) + if err != nil { + return nil, err + } + return &and.ReadCloser{ + Reader: gr, + CloseFunc: func() error { + // If the unzip fails, then this seems to return the same + // error as the read. We don't want this to interfere with + // us closing the main ReadCloser, since this could leave + // an open file descriptor (fails on Windows). + gr.Close() + return r.Close() + }, + }, nil +} + +// Is detects whether the input stream is compressed. +func Is(r io.Reader) (bool, error) { + magicHeader := make([]byte, 4) + n, err := r.Read(magicHeader) + if n == 0 && err == io.EOF { + return false, nil + } + if err != nil { + return false, err + } + return bytes.Equal(magicHeader, MagicHeader), nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go b/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go index fc605d7511..a4a88b3d55 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go @@ -123,6 +123,10 @@ func (dk *defaultKeychain) Resolve(target Resource) (Authenticator, error) { if err != nil { return nil, err } + // cf.GetAuthConfig automatically sets the ServerAddress attribute. Since + // we don't make use of it, clear the value for a proper "is-empty" test. + // See: https://github.com/google/go-containerregistry/issues/1510 + cfg.ServerAddress = "" if cfg != empty { break } diff --git a/vendor/github.com/google/go-containerregistry/pkg/compression/compression.go b/vendor/github.com/google/go-containerregistry/pkg/compression/compression.go new file mode 100644 index 0000000000..6686c2d8d9 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/compression/compression.go @@ -0,0 +1,26 @@ +// Copyright 2022 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package compression abstracts over gzip and zstd. +package compression + +// Compression is an enumeration of the supported compression algorithms +type Compression string + +// The collection of known MediaType values. +const ( + None Compression = "none" + GZip Compression = "gzip" + ZStd Compression = "zstd" +) diff --git a/vendor/github.com/google/go-containerregistry/pkg/logs/logs.go b/vendor/github.com/google/go-containerregistry/pkg/logs/logs.go index 5d25d63d61..a5d25b1887 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/logs/logs.go +++ b/vendor/github.com/google/go-containerregistry/pkg/logs/logs.go @@ -16,24 +16,24 @@ package logs import ( - "io/ioutil" + "io" "log" ) var ( // Warn is used to log non-fatal errors. - Warn = log.New(ioutil.Discard, "", log.LstdFlags) + Warn = log.New(io.Discard, "", log.LstdFlags) // Progress is used to log notable, successful events. - Progress = log.New(ioutil.Discard, "", log.LstdFlags) + Progress = log.New(io.Discard, "", log.LstdFlags) // Debug is used to log information that is useful for debugging. - Debug = log.New(ioutil.Discard, "", log.LstdFlags) + Debug = log.New(io.Discard, "", log.LstdFlags) ) // Enabled checks to see if the logger's writer is set to something other -// than ioutil.Discard. This allows callers to avoid expensive operations +// than io.Discard. This allows callers to avoid expensive operations // that will end up in /dev/null anyway. func Enabled(l *log.Logger) bool { - return l.Writer() != ioutil.Discard + return l.Writer() != io.Discard } diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/errors.go b/vendor/github.com/google/go-containerregistry/pkg/name/errors.go index 35a25847f6..bf004ffcfb 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/name/errors.go +++ b/vendor/github.com/google/go-containerregistry/pkg/name/errors.go @@ -35,7 +35,7 @@ func (e *ErrBadName) Is(target error) bool { } // newErrBadName returns a ErrBadName which returns the given formatted string from Error(). -func newErrBadName(fmtStr string, args ...interface{}) *ErrBadName { +func newErrBadName(fmtStr string, args ...any) *ErrBadName { return &ErrBadName{fmt.Sprintf(fmtStr, args...)} } diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/ref.go b/vendor/github.com/google/go-containerregistry/pkg/name/ref.go index 955c59a7bb..912ab33018 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/name/ref.go +++ b/vendor/github.com/google/go-containerregistry/pkg/name/ref.go @@ -56,16 +56,16 @@ type stringConst string // To discourage its use in scenarios where the value is not known at code // authoring time, it must be passed a string constant: // -// const str = "valid/string" -// MustParseReference(str) -// MustParseReference("another/valid/string") -// MustParseReference(str + "/and/more") +// const str = "valid/string" +// MustParseReference(str) +// MustParseReference("another/valid/string") +// MustParseReference(str + "/and/more") // // These will not compile: // -// var str = "valid/string" -// MustParseReference(str) -// MustParseReference(strings.Join([]string{"valid", "string"}, "/")) +// var str = "valid/string" +// MustParseReference(str) +// MustParseReference(strings.Join([]string{"valid", "string"}, "/")) func MustParseReference(s stringConst, opts ...Option) Reference { ref, err := ParseReference(string(s), opts...) if err != nil { diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/config.go b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go index 40b1607789..08ec0158f6 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/config.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go @@ -90,8 +90,10 @@ type HealthConfig struct { } // Config is a submessage of the config file described as: -// The execution parameters which SHOULD be used as a base when running -// a container using the image. +// +// The execution parameters which SHOULD be used as a base when running +// a container using the image. +// // The names of the fields in this message are chosen to reflect the JSON // payload of the Config as defined here: // https://git.io/vrAET diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/match/match.go b/vendor/github.com/google/go-containerregistry/pkg/v1/match/match.go index 0f886667ad..98b1ff9094 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/match/match.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/match/match.go @@ -25,7 +25,9 @@ import ( type Matcher func(desc v1.Descriptor) bool // Name returns a match.Matcher that matches based on the value of the -// "org.opencontainers.image.ref.name" annotation: +// +// "org.opencontainers.image.ref.name" annotation: +// // github.com/opencontainers/image-spec/blob/v1.0.1/annotations.md#pre-defined-annotation-keys func Name(name string) Matcher { return Annotation(imagespec.AnnotationRefName, name) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go index c999517289..44989ac968 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go @@ -18,7 +18,10 @@ import ( "io" "github.com/google/go-containerregistry/internal/and" + "github.com/google/go-containerregistry/internal/compression" "github.com/google/go-containerregistry/internal/gzip" + "github.com/google/go-containerregistry/internal/zstd" + comp "github.com/google/go-containerregistry/pkg/compression" v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/types" ) @@ -51,23 +54,27 @@ func (cle *compressedLayerExtender) Uncompressed() (io.ReadCloser, error) { return nil, err } - // Often, the "compressed" bytes are not actually gzip-compressed. - // Peek at the first two bytes to determine whether or not it's correct to - // wrap this with gzip.UnzipReadCloser. - gzipped, pr, err := gzip.Peek(rc) + // Often, the "compressed" bytes are not actually-compressed. + // Peek at the first two bytes to determine whether it's correct to + // wrap this with gzip.UnzipReadCloser or zstd.UnzipReadCloser. + cp, pr, err := compression.PeekCompression(rc) if err != nil { return nil, err } + prc := &and.ReadCloser{ Reader: pr, CloseFunc: rc.Close, } - if !gzipped { + switch cp { + case comp.GZip: + return gzip.UnzipReadCloser(prc) + case comp.ZStd: + return zstd.UnzipReadCloser(prc) + default: return prc, nil } - - return gzip.UnzipReadCloser(prc) } // DiffID implements v1.Layer diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go index b64e9881ee..3ad4992892 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go @@ -19,7 +19,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/types" @@ -67,12 +66,12 @@ func (cl *configLayer) DiffID() (v1.Hash, error) { // Uncompressed implements v1.Layer func (cl *configLayer) Uncompressed() (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewBuffer(cl.content)), nil + return io.NopCloser(bytes.NewBuffer(cl.content)), nil } // Compressed implements v1.Layer func (cl *configLayer) Compressed() (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewBuffer(cl.content)), nil + return io.NopCloser(bytes.NewBuffer(cl.content)), nil } // Size implements v1.Layer @@ -355,7 +354,7 @@ func UncompressedSize(l v1.Layer) (int64, error) { } defer rc.Close() - return io.Copy(ioutil.Discard, rc) + return io.Copy(io.Discard, rc) } type withExists interface { @@ -385,7 +384,7 @@ func Exists(l v1.Layer) (bool, error) { // Recursively unwrap our wrappers so that we can check for the original implementation. // We might want to expose this? -func unwrap(i interface{}) interface{} { +func unwrap(i any) any { if ule, ok := i.(*uncompressedLayerExtender); ok { return unwrap(ule.UncompressedLayer) } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go index 8a0a6ca7b0..b4395c2391 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go @@ -1,3 +1,17 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package remote import ( @@ -35,11 +49,10 @@ func CheckPushPermission(ref name.Reference, kc authn.Keychain, t http.RoundTrip // authorize a push. Figure out how to return early here when we can, // to avoid a roundtrip for spec-compliant registries. w := writer{ - repo: ref.Context(), - client: &http.Client{Transport: tr}, - context: context.Background(), + repo: ref.Context(), + client: &http.Client{Transport: tr}, } - loc, _, err := w.initiateUpload("", "", "") + loc, _, err := w.initiateUpload(context.Background(), "", "", "") if loc != "" { // Since we're only initiating the upload to check whether we // can, we should attempt to cancel it, in case initiating diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go index cbe1268e02..9c8ae168fa 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go @@ -19,7 +19,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strings" @@ -260,7 +259,7 @@ func (f *fetcher) fetchManifest(ref name.Reference, acceptable []types.MediaType return nil, nil, err } - manifest, err := ioutil.ReadAll(resp.Body) + manifest, err := io.ReadAll(resp.Body) if err != nil { return nil, nil, err } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go index a36416d8c3..4e17de76e8 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go @@ -17,7 +17,6 @@ package remote import ( "bytes" "io" - "io/ioutil" "net/http" "net/url" "sync" @@ -115,7 +114,7 @@ func (r *remoteImage) RawConfigFile() ([]byte, error) { } defer body.Close() - r.config, err = ioutil.ReadAll(body) + r.config, err = io.ReadAll(body) if err != nil { return nil, err } @@ -153,7 +152,7 @@ func (rl *remoteImageLayer) Compressed() (io.ReadCloser, error) { } if d.Data != nil { - return verify.ReadCloser(ioutil.NopCloser(bytes.NewReader(d.Data)), d.Size, d.Digest) + return verify.ReadCloser(io.NopCloser(bytes.NewReader(d.Data)), d.Size, d.Digest) } // We don't want to log binary layers -- this can break terminals. diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go index 9898579188..a4ee74c057 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go @@ -194,10 +194,12 @@ func (r *remoteIndex) imageByPlatform(platform v1.Platform) (v1.Image, error) { // This naively matches the first manifest with matching platform attributes. // // We should probably use this instead: -// github.com/containerd/containerd/platforms +// +// github.com/containerd/containerd/platforms // // But first we'd need to migrate to: -// github.com/opencontainers/image-spec/specs-go/v1 +// +// github.com/opencontainers/image-spec/specs-go/v1 func (r *remoteIndex) childByPlatform(platform v1.Platform) (*Descriptor, error) { index, err := r.IndexManifest() if err != nil { diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go index 002ef8587b..7f32413cee 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go @@ -89,7 +89,6 @@ func MultiWrite(m map[name.Reference]Taggable, options ...Option) (rerr error) { w := writer{ repo: repo, client: &http.Client{Transport: tr}, - context: o.context, backoff: o.retryBackoff, predicate: o.retryPredicate, } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go index 70882435f1..91b9005c99 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go @@ -74,6 +74,22 @@ var defaultRetryBackoff = Backoff{ Steps: 3, } +// Useful for tests +var fastBackoff = Backoff{ + Duration: 1.0 * time.Millisecond, + Factor: 3.0, + Jitter: 0.1, + Steps: 3, +} + +var retryableStatusCodes = []int{ + http.StatusRequestTimeout, + http.StatusInternalServerError, + http.StatusBadGateway, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout, +} + const ( defaultJobs = 4 @@ -87,10 +103,7 @@ const ( var DefaultTransport http.RoundTripper = &http.Transport{ Proxy: http.ProxyFromEnvironment, DialContext: (&net.Dialer{ - // By default we wrap the transport in retries, so reduce the - // default dial timeout to 5s to avoid 5x 30s of connection - // timeouts when doing the "ping" on certain http registries. - Timeout: 5 * time.Second, + Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, }).DialContext, ForceAttemptHTTP2: true, @@ -143,7 +156,7 @@ func makeOptions(target authn.Resource, opts ...Option) (*options, error) { } // Wrap the transport in something that can retry network flakes. - o.transport = transport.NewRetry(o.transport) + o.transport = transport.NewRetry(o.transport, transport.WithRetryPredicate(defaultRetryPredicate), transport.WithRetryStatusCodes(retryableStatusCodes...)) // Wrap this last to prevent transport.New from double-wrapping. if o.userAgent != "" { diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go index 2131ee487c..ea07ff6abb 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go @@ -19,7 +19,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" @@ -268,11 +268,13 @@ func (bt *bearerTransport) refreshOauth(ctx context.Context) ([]byte, error) { defer resp.Body.Close() if err := CheckError(resp, http.StatusOK); err != nil { - logs.Warn.Printf("No matching credentials were found for %q", bt.registry) + if bt.basic == authn.Anonymous { + logs.Warn.Printf("No matching credentials were found for %q", bt.registry) + } return nil, err } - return ioutil.ReadAll(resp.Body) + return io.ReadAll(resp.Body) } // https://docs.docker.com/registry/spec/auth/token/ @@ -308,9 +310,11 @@ func (bt *bearerTransport) refreshBasic(ctx context.Context) ([]byte, error) { defer resp.Body.Close() if err := CheckError(resp, http.StatusOK); err != nil { - logs.Warn.Printf("No matching credentials were found for %q", bt.registry) + if bt.basic == authn.Anonymous { + logs.Warn.Printf("No matching credentials were found for %q", bt.registry) + } return nil, err } - return ioutil.ReadAll(resp.Body) + return io.ReadAll(resp.Body) } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go index f059f77b6d..c0e4337300 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go @@ -17,7 +17,7 @@ package transport import ( "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "strings" @@ -86,9 +86,9 @@ func (e *Error) Temporary() bool { // Diagnostic represents a single error returned by a Docker registry interaction. type Diagnostic struct { - Code ErrorCode `json:"code"` - Message string `json:"message,omitempty"` - Detail interface{} `json:"detail,omitempty"` + Code ErrorCode `json:"code"` + Message string `json:"message,omitempty"` + Detail any `json:"detail,omitempty"` } // String stringifies the Diagnostic in the form: $Code: $Message[; $Detail] @@ -153,7 +153,7 @@ func CheckError(resp *http.Response, codes ...int) error { return nil } } - b, err := ioutil.ReadAll(resp.Body) + b, err := io.ReadAll(resp.Body) if err != nil { return err } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go index 29c36afe7c..d852ef8455 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go @@ -19,11 +19,12 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net/http" "strings" + "time" authchallenge "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/google/go-containerregistry/pkg/logs" "github.com/google/go-containerregistry/pkg/name" ) @@ -35,6 +36,9 @@ const ( bearer challenge = "bearer" ) +// 300ms is the default fallback period for go's DNS dialer but we could make this configurable. +var fallbackDelay = 300 * time.Millisecond + type pingResp struct { challenge challenge @@ -50,27 +54,7 @@ func (c challenge) Canonical() challenge { return challenge(strings.ToLower(string(c))) } -func parseChallenge(suffix string) map[string]string { - kv := make(map[string]string) - for _, token := range strings.Split(suffix, ",") { - // Trim any whitespace around each token. - token = strings.Trim(token, " ") - - // Break the token into a key/value pair - if parts := strings.SplitN(token, "=", 2); len(parts) == 2 { - // Unquote the value, if it is quoted. - kv[parts[0]] = strings.Trim(parts[1], `"`) - } else { - // If there was only one part, treat is as a key with an empty value - kv[token] = "" - } - } - return kv -} - func ping(ctx context.Context, reg name.Registry, t http.RoundTripper) (*pingResp, error) { - client := http.Client{Transport: t} - // This first attempts to use "https" for every request, falling back to http // if the registry matches our localhost heuristic or if it is intentionally // set to insecure via name.NewInsecureRegistry. @@ -78,54 +62,117 @@ func ping(ctx context.Context, reg name.Registry, t http.RoundTripper) (*pingRes if reg.Scheme() == "http" { schemes = append(schemes, "http") } + if len(schemes) == 1 { + return pingSingle(ctx, reg, t, schemes[0]) + } + return pingParallel(ctx, reg, t, schemes) +} - var errs []error - for _, scheme := range schemes { - url := fmt.Sprintf("%s://%s/v2/", scheme, reg.Name()) - req, err := http.NewRequest(http.MethodGet, url, nil) - if err != nil { - return nil, err - } - resp, err := client.Do(req.WithContext(ctx)) - if err != nil { - errs = append(errs, err) - // Potentially retry with http. - continue - } - defer func() { - // By draining the body, make sure to reuse the connection made by - // the ping for the following access to the registry - io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - }() - - switch resp.StatusCode { - case http.StatusOK: - // If we get a 200, then no authentication is needed. +func pingSingle(ctx context.Context, reg name.Registry, t http.RoundTripper, scheme string) (*pingResp, error) { + client := http.Client{Transport: t} + url := fmt.Sprintf("%s://%s/v2/", scheme, reg.Name()) + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return nil, err + } + resp, err := client.Do(req.WithContext(ctx)) + if err != nil { + return nil, err + } + defer func() { + // By draining the body, make sure to reuse the connection made by + // the ping for the following access to the registry + io.Copy(io.Discard, resp.Body) + resp.Body.Close() + }() + + switch resp.StatusCode { + case http.StatusOK: + // If we get a 200, then no authentication is needed. + return &pingResp{ + challenge: anonymous, + scheme: scheme, + }, nil + case http.StatusUnauthorized: + if challenges := authchallenge.ResponseChallenges(resp); len(challenges) != 0 { + // If we hit more than one, let's try to find one that we know how to handle. + wac := pickFromMultipleChallenges(challenges) return &pingResp{ - challenge: anonymous, - scheme: scheme, + challenge: challenge(wac.Scheme).Canonical(), + parameters: wac.Parameters, + scheme: scheme, }, nil - case http.StatusUnauthorized: - if challenges := authchallenge.ResponseChallenges(resp); len(challenges) != 0 { - // If we hit more than one, let's try to find one that we know how to handle. - wac := pickFromMultipleChallenges(challenges) - return &pingResp{ - challenge: challenge(wac.Scheme).Canonical(), - parameters: wac.Parameters, - scheme: scheme, - }, nil + } + // Otherwise, just return the challenge without parameters. + return &pingResp{ + challenge: challenge(resp.Header.Get("WWW-Authenticate")).Canonical(), + scheme: scheme, + }, nil + default: + return nil, CheckError(resp, http.StatusOK, http.StatusUnauthorized) + } +} + +// Based on the golang happy eyeballs dialParallel impl in net/dial.go. +func pingParallel(ctx context.Context, reg name.Registry, t http.RoundTripper, schemes []string) (*pingResp, error) { + returned := make(chan struct{}) + defer close(returned) + + type pingResult struct { + *pingResp + error + primary bool + done bool + } + + results := make(chan pingResult) + + startRacer := func(ctx context.Context, scheme string) { + pr, err := pingSingle(ctx, reg, t, scheme) + select { + case results <- pingResult{pingResp: pr, error: err, primary: scheme == "https", done: true}: + case <-returned: + if pr != nil { + logs.Debug.Printf("%s lost race", scheme) + } + } + } + + var primary, fallback pingResult + + primaryCtx, primaryCancel := context.WithCancel(ctx) + defer primaryCancel() + go startRacer(primaryCtx, schemes[0]) + + fallbackTimer := time.NewTimer(fallbackDelay) + defer fallbackTimer.Stop() + + for { + select { + case <-fallbackTimer.C: + fallbackCtx, fallbackCancel := context.WithCancel(ctx) + defer fallbackCancel() + go startRacer(fallbackCtx, schemes[1]) + + case res := <-results: + if res.error == nil { + return res.pingResp, nil + } + if res.primary { + primary = res + } else { + fallback = res + } + if primary.done && fallback.done { + return nil, multierrs([]error{primary.error, fallback.error}) + } + if res.primary && fallbackTimer.Stop() { + // Primary failed and we haven't started the fallback, + // reset time to start fallback immediately. + fallbackTimer.Reset(0) } - // Otherwise, just return the challenge without parameters. - return &pingResp{ - challenge: challenge(resp.Header.Get("WWW-Authenticate")).Canonical(), - scheme: scheme, - }, nil - default: - return nil, CheckError(resp, http.StatusOK, http.StatusUnauthorized) } } - return nil, multierrs(errs) } func pickFromMultipleChallenges(challenges []authchallenge.Challenge) authchallenge.Challenge { @@ -161,7 +208,7 @@ func (m multierrs) Error() string { return b.String() } -func (m multierrs) As(target interface{}) bool { +func (m multierrs) As(target any) bool { for _, err := range m { if errors.As(err, target) { return true diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go index 0a45dc75b9..e5621e3053 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go @@ -21,12 +21,12 @@ import ( "github.com/google/go-containerregistry/internal/retry" ) -// Sleep for 0.1, 0.3, 0.9, 2.7 seconds. This should cover networking blips. +// Sleep for 0.1 then 0.3 seconds. This should cover networking blips. var defaultBackoff = retry.Backoff{ Duration: 100 * time.Millisecond, Factor: 3.0, Jitter: 0.1, - Steps: 5, + Steps: 3, } var _ http.RoundTripper = (*retryTransport)(nil) @@ -36,6 +36,7 @@ type retryTransport struct { inner http.RoundTripper backoff retry.Backoff predicate retry.Predicate + codes []int } // Option is a functional option for retryTransport. @@ -44,6 +45,7 @@ type Option func(*options) type options struct { backoff retry.Backoff predicate retry.Predicate + codes []int } // Backoff is an alias of retry.Backoff to expose this configuration option to consumers of this lib @@ -63,6 +65,13 @@ func WithRetryPredicate(predicate func(error) bool) Option { } } +// WithRetryStatusCodes sets which http response codes will be retried. +func WithRetryStatusCodes(codes ...int) Option { + return func(o *options) { + o.codes = codes + } +} + // NewRetry returns a transport that retries errors. func NewRetry(inner http.RoundTripper, opts ...Option) http.RoundTripper { o := &options{ @@ -78,12 +87,23 @@ func NewRetry(inner http.RoundTripper, opts ...Option) http.RoundTripper { inner: inner, backoff: o.backoff, predicate: o.predicate, + codes: o.codes, } } func (t *retryTransport) RoundTrip(in *http.Request) (out *http.Response, err error) { roundtrip := func() error { out, err = t.inner.RoundTrip(in) + if !retry.Ever(in.Context()) { + return nil + } + if out != nil { + for _, code := range t.codes { + if out.StatusCode == code { + return CheckError(out) + } + } + } return err } retry.Retry(roundtrip, t.predicate, t.backoff) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go index 137349bf21..99e9fe837b 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go @@ -75,7 +75,6 @@ func writeImage(ctx context.Context, ref name.Reference, img v1.Image, o *option w := writer{ repo: ref.Context(), client: &http.Client{Transport: tr}, - context: ctx, progress: progress, backoff: o.retryBackoff, predicate: o.retryPredicate, @@ -169,9 +168,8 @@ func writeImage(ctx context.Context, ref name.Reference, img v1.Image, o *option // writer writes the elements of an image to a remote image reference. type writer struct { - repo name.Repository - client *http.Client - context context.Context + repo name.Repository + client *http.Client progress *progress backoff Backoff @@ -207,7 +205,7 @@ func (w *writer) nextLocation(resp *http.Response) (string, error) { // HEAD request to the blob store API. GCR performs an existence check on the // initiation if "mount" is specified, even if no "from" sources are specified. // However, this is not broadly applicable to all registries, e.g. ECR. -func (w *writer) checkExistingBlob(h v1.Hash) (bool, error) { +func (w *writer) checkExistingBlob(ctx context.Context, h v1.Hash) (bool, error) { u := w.url(fmt.Sprintf("/v2/%s/blobs/%s", w.repo.RepositoryStr(), h.String())) req, err := http.NewRequest(http.MethodHead, u.String(), nil) @@ -215,7 +213,7 @@ func (w *writer) checkExistingBlob(h v1.Hash) (bool, error) { return false, err } - resp, err := w.client.Do(req.WithContext(w.context)) + resp, err := w.client.Do(req.WithContext(ctx)) if err != nil { return false, err } @@ -230,7 +228,7 @@ func (w *writer) checkExistingBlob(h v1.Hash) (bool, error) { // checkExistingManifest checks if a manifest exists already in the repository // by making a HEAD request to the manifest API. -func (w *writer) checkExistingManifest(h v1.Hash, mt types.MediaType) (bool, error) { +func (w *writer) checkExistingManifest(ctx context.Context, h v1.Hash, mt types.MediaType) (bool, error) { u := w.url(fmt.Sprintf("/v2/%s/manifests/%s", w.repo.RepositoryStr(), h.String())) req, err := http.NewRequest(http.MethodHead, u.String(), nil) @@ -239,7 +237,7 @@ func (w *writer) checkExistingManifest(h v1.Hash, mt types.MediaType) (bool, err } req.Header.Set("Accept", string(mt)) - resp, err := w.client.Do(req.WithContext(w.context)) + resp, err := w.client.Do(req.WithContext(ctx)) if err != nil { return false, err } @@ -258,7 +256,7 @@ func (w *writer) checkExistingManifest(h v1.Hash, mt types.MediaType) (bool, err // On success, the layer was either mounted (nothing more to do) or a blob // upload was initiated and the body of that blob should be sent to the returned // location. -func (w *writer) initiateUpload(from, mount, origin string) (location string, mounted bool, err error) { +func (w *writer) initiateUpload(ctx context.Context, from, mount, origin string) (location string, mounted bool, err error) { u := w.url(fmt.Sprintf("/v2/%s/blobs/uploads/", w.repo.RepositoryStr())) uv := url.Values{} if mount != "" && from != "" { @@ -277,7 +275,7 @@ func (w *writer) initiateUpload(from, mount, origin string) (location string, mo return "", false, err } req.Header.Set("Content-Type", "application/json") - resp, err := w.client.Do(req.WithContext(w.context)) + resp, err := w.client.Do(req.WithContext(ctx)) if err != nil { return "", false, err } @@ -287,7 +285,7 @@ func (w *writer) initiateUpload(from, mount, origin string) (location string, mo if origin != "" && origin != w.repo.RegistryStr() { // https://github.com/google/go-containerregistry/issues/1404 logs.Warn.Printf("retrying without mount: %v", err) - return w.initiateUpload("", "", "") + return w.initiateUpload(ctx, "", "", "") } return "", false, err } @@ -364,7 +362,7 @@ func (w *writer) streamBlob(ctx context.Context, layer v1.Layer, streamLocation // commitBlob commits this blob by sending a PUT to the location returned from // streaming the blob. -func (w *writer) commitBlob(location, digest string) error { +func (w *writer) commitBlob(ctx context.Context, location, digest string) error { u, err := url.Parse(location) if err != nil { return err @@ -379,7 +377,7 @@ func (w *writer) commitBlob(location, digest string) error { } req.Header.Set("Content-Type", "application/octet-stream") - resp, err := w.client.Do(req.WithContext(w.context)) + resp, err := w.client.Do(req.WithContext(ctx)) if err != nil { return err } @@ -399,11 +397,12 @@ func (w *writer) incrProgress(written int64) { // uploadOne performs a complete upload of a single layer. func (w *writer) uploadOne(ctx context.Context, l v1.Layer) error { tryUpload := func() error { + ctx := retry.Never(ctx) var from, mount, origin string if h, err := l.Digest(); err == nil { // If we know the digest, this isn't a streaming layer. Do an existence // check so we can skip uploading the layer if possible. - existing, err := w.checkExistingBlob(h) + existing, err := w.checkExistingBlob(ctx, h) if err != nil { return err } @@ -424,7 +423,7 @@ func (w *writer) uploadOne(ctx context.Context, l v1.Layer) error { origin = ml.Reference.Context().RegistryStr() } - location, mounted, err := w.initiateUpload(from, mount, origin) + location, mounted, err := w.initiateUpload(ctx, from, mount, origin) if err != nil { return err } else if mounted { @@ -463,7 +462,7 @@ func (w *writer) uploadOne(ctx context.Context, l v1.Layer) error { } digest := h.String() - if err := w.commitBlob(location, digest); err != nil { + if err := w.commitBlob(ctx, location, digest); err != nil { return err } logs.Progress.Printf("pushed blob: %s", digest) @@ -491,7 +490,7 @@ func (w *writer) writeIndex(ctx context.Context, ref name.Reference, ii v1.Image // TODO(#803): Pipe through remote.WithJobs and upload these in parallel. for _, desc := range index.Manifests { ref := ref.Context().Digest(desc.Digest.String()) - exists, err := w.checkExistingManifest(desc.Digest, desc.MediaType) + exists, err := w.checkExistingManifest(ctx, desc.Digest, desc.MediaType) if err != nil { return err } @@ -581,6 +580,7 @@ func unpackTaggable(t Taggable) ([]byte, *v1.Descriptor, error) { // commitManifest does a PUT of the image's manifest. func (w *writer) commitManifest(ctx context.Context, t Taggable, ref name.Reference) error { tryUpload := func() error { + ctx := retry.Never(ctx) raw, desc, err := unpackTaggable(t) if err != nil { return err @@ -656,7 +656,6 @@ func WriteIndex(ref name.Reference, ii v1.ImageIndex, options ...Option) (rerr e w := writer{ repo: ref.Context(), client: &http.Client{Transport: tr}, - context: o.context, backoff: o.retryBackoff, predicate: o.retryPredicate, } @@ -799,7 +798,6 @@ func WriteLayer(repo name.Repository, layer v1.Layer, options ...Option) (rerr e w := writer{ repo: repo, client: &http.Client{Transport: tr}, - context: o.context, backoff: o.retryBackoff, predicate: o.retryPredicate, } @@ -870,7 +868,6 @@ func Put(ref name.Reference, t Taggable, options ...Option) error { w := writer{ repo: ref.Context(), client: &http.Client{Transport: tr}, - context: o.context, backoff: o.retryBackoff, predicate: o.retryPredicate, } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go index fa23482397..9dd3e5ffbe 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package stream implements a single-pass streaming v1.Layer. package stream import ( diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go b/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go index 21f2236502..363e989fe2 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package types holds common OCI media types. package types // MediaType is an enumeration of the supported mime types that an element of an image might have. @@ -24,6 +25,7 @@ const ( OCIManifestSchema1 MediaType = "application/vnd.oci.image.manifest.v1+json" OCIConfigJSON MediaType = "application/vnd.oci.image.config.v1+json" OCILayer MediaType = "application/vnd.oci.image.layer.v1.tar+gzip" + OCILayerZStd MediaType = "application/vnd.oci.image.layer.v1.tar+zstd" OCIRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" OCIUncompressedLayer MediaType = "application/vnd.oci.image.layer.v1.tar" OCIUncompressedRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar" diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go index 4ec00fe7d9..60ef7e9268 100644 --- a/vendor/github.com/google/pprof/profile/profile.go +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -72,9 +72,23 @@ type ValueType struct { type Sample struct { Location []*Location Value []int64 - Label map[string][]string + // Label is a per-label-key map to values for string labels. + // + // In general, having multiple values for the given label key is strongly + // discouraged - see docs for the sample label field in profile.proto. The + // main reason this unlikely state is tracked here is to make the + // decoding->encoding roundtrip not lossy. But we expect that the value + // slices present in this map are always of length 1. + Label map[string][]string + // NumLabel is a per-label-key map to values for numeric labels. See a note + // above on handling multiple values for a label. NumLabel map[string][]int64 - NumUnit map[string][]string + // NumUnit is a per-label-key map to the unit names of corresponding numeric + // label values. The unit info may be missing even if the label is in + // NumLabel, see the docs in profile.proto for details. When the value is + // slice is present and not nil, its length must be equal to the length of + // the corresponding value slice in NumLabel. + NumUnit map[string][]string locationIDX []uint64 labelX []label @@ -715,6 +729,35 @@ func (s *Sample) HasLabel(key, value string) bool { return false } +// SetNumLabel sets the specified key to the specified value for all samples in the +// profile. "unit" is a slice that describes the units that each corresponding member +// of "values" is measured in (e.g. bytes or seconds). If there is no relevant +// unit for a given value, that member of "unit" should be the empty string. +// "unit" must either have the same length as "value", or be nil. +func (p *Profile) SetNumLabel(key string, value []int64, unit []string) { + for _, sample := range p.Sample { + if sample.NumLabel == nil { + sample.NumLabel = map[string][]int64{key: value} + } else { + sample.NumLabel[key] = value + } + if sample.NumUnit == nil { + sample.NumUnit = map[string][]string{key: unit} + } else { + sample.NumUnit[key] = unit + } + } +} + +// RemoveNumLabel removes all numerical labels associated with the specified key for all +// samples in the profile. +func (p *Profile) RemoveNumLabel(key string) { + for _, sample := range p.Sample { + delete(sample.NumLabel, key) + delete(sample.NumUnit, key) + } +} + // DiffBaseSample returns true if a sample belongs to the diff base and false // otherwise. func (s *Sample) DiffBaseSample() bool { diff --git a/vendor/github.com/goreleaser/chglog/.gitattributes b/vendor/github.com/goreleaser/chglog/.gitattributes new file mode 100644 index 0000000000..48b6a6e12e --- /dev/null +++ b/vendor/github.com/goreleaser/chglog/.gitattributes @@ -0,0 +1,6 @@ +* text eol=lf + +# Denote all files that are truly binary and should not be modified. +*.png binary +*.jpg binary +*.ico binary \ No newline at end of file diff --git a/vendor/github.com/goreleaser/chglog/.gitignore b/vendor/github.com/goreleaser/chglog/.gitignore new file mode 100644 index 0000000000..1ec9357e68 --- /dev/null +++ b/vendor/github.com/goreleaser/chglog/.gitignore @@ -0,0 +1,9 @@ +.idea/ +*.rpm +coverage.txt +dist +.DS_Store +bin +coverage.out +chglog +!chglog/ \ No newline at end of file diff --git a/vendor/github.com/goreleaser/chglog/.golangci.yaml b/vendor/github.com/goreleaser/chglog/.golangci.yaml new file mode 100644 index 0000000000..486cb074ea --- /dev/null +++ b/vendor/github.com/goreleaser/chglog/.golangci.yaml @@ -0,0 +1,25 @@ +run: + go: "1.19" +linters: + enable: + - thelper + - gofumpt + - tparallel + - unconvert + - unparam + - wastedassign + - revive + - forbidigo + - tagliatelle + - misspell + +linters-settings: + forbidigo: + forbid: + - 'ioutil\.*' + tagliatelle: + case: + use-field-name: false + rules: + yaml: snake + json: snake diff --git a/vendor/github.com/goreleaser/chglog/.golangci.yml b/vendor/github.com/goreleaser/chglog/.golangci.yml new file mode 100644 index 0000000000..2e45a85f0b --- /dev/null +++ b/vendor/github.com/goreleaser/chglog/.golangci.yml @@ -0,0 +1,41 @@ +linters: + enable-all: false + disable: + - godox + - wsl + - testpackage + - gofumpt + - exhaustivestruct +linters-settings: + maligned: + # print struct with more effective memory layout or not, false by default + suggest-new: true + gocyclo: + # minimal code complexity to report, 30 by default (but we recommend 10-20) + min-complexity: 30 + goimports: + # put imports beginning with prefix after 3rd-party packages; + # it's a comma-separated list of prefixes + local-prefixes: github.com/goreleaser/nfpm + govet: + check-shadowing: true + errcheck: + ignore: ^Close.*,fmt:.*,github.com/pkg/errors:^Wrap.*,os:^Setenv$,github.com/spf13/viper:.* + lll: + line-length: 200 + golint: + min-confidence: .8 + nakedret: + max-func-lines: 0 + gocritic: + enabled-tags: + - style + - performance +issues: + exclude-rules: + - text: "G104" # gosec G104 is caught by errcheck + linters: + - gosec + - text: "SA5001" # staticcheck SA5001 is caught by errcheck also + linters: + - staticcheck diff --git a/vendor/github.com/goreleaser/chglog/.goreleaser.yml b/vendor/github.com/goreleaser/chglog/.goreleaser.yml new file mode 100644 index 0000000000..b60937b945 --- /dev/null +++ b/vendor/github.com/goreleaser/chglog/.goreleaser.yml @@ -0,0 +1,97 @@ +before: + hooks: + - go mod tidy + +gomod: + proxy: true + +builds: +- main: ./cmd/chglog + env: + - CGO_ENABLED=0 + goos: + - linux + - darwin + - windows + goarch: + - amd64 + - arm64 + mod_timestamp: '{{ .CommitTimestamp }}' + flags: + - -trimpath + ldflags: + - -s -w -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{ .CommitDate }} -X main.builtBy=goreleaser + +changelog: + sort: asc + use: github + filters: + exclude: + - '^test:' + - '^chore' + - 'merge conflict' + - Merge pull request + - Merge remote-tracking branch + - Merge branch + - go mod tidy + groups: + - title: Dependency updates + regexp: "^.*feat\\(deps\\)*:+.*$" + order: 300 + - title: 'New Features' + regexp: "^.*feat[(\\w)]*:+.*$" + order: 100 + - title: 'Bug fixes' + regexp: "^.*fix[(\\w)]*:+.*$" + order: 200 + - title: 'Documentation updates' + regexp: "^.*docs[(\\w)]*:+.*$" + order: 400 + - title: Other work + order: 9999 + +archives: + - replacements: + darwin: Darwin + linux: Linux + windows: Windows + 386: i386 + amd64: x86_64 +brews: + - tap: + owner: goreleaser + name: homebrew-tap + folder: Formula + homepage: https://github.com/goreleaser/chglog + description: chglog is a changelog management library and tool + test: | + system "#{bin}/chglog version" +nfpms: + - file_name_template: '{{ .ProjectName }}_{{ .Arch }}' + homepage: https://github.com/goreleaser/chglog + description: chglog is a changelog management library and tool + maintainer: Dj Gilcrease + license: MIT + vendor: GoReleaser + formats: + - deb + - rpm +scoop: + bucket: + owner: goreleaser + name: scoop-bucket + homepage: https://goreleaser.com + description: Deliver Go binaries as fast and easily as possible + license: MIT + + +release: + footer: | + **Full Changelog**: https://github.com/goreleaser/goreleaser/compare/{{ .PreviousTag }}...{{ .Tag }} + + ## What to do next? + + - Read the [documentation](https://goreleaser.com/intro/) + - Check out the [GoReleaser Pro](https://goreleaser.com/pro) distribution + - Join our [Discord server](https://discord.gg/RGEBtg8vQ6) + - Follow us on [Twitter](https://twitter.com/goreleaser) diff --git a/vendor/github.com/goreleaser/chglog/LICENSE b/vendor/github.com/goreleaser/chglog/LICENSE new file mode 100644 index 0000000000..075798b2ed --- /dev/null +++ b/vendor/github.com/goreleaser/chglog/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Dj Gilcrease + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/goreleaser/chglog/Makefile b/vendor/github.com/goreleaser/chglog/Makefile new file mode 100644 index 0000000000..bdde37f160 --- /dev/null +++ b/vendor/github.com/goreleaser/chglog/Makefile @@ -0,0 +1,30 @@ +SOURCE_FILES?=./... +TEST_PATTERN?=. +TEST_OPTIONS?= +TEST_TIMEOUT?=5m +SEMVER?=0.0.0-$(shell whoami) +CI_COMMIT_SHORT_SHA?=$(shell git log --pretty=format:'%h' -n 1) + +test: + go test $(TEST_OPTIONS) -v -failfast -race -coverpkg=./... -covermode=atomic -coverprofile=coverage.out $(SOURCE_FILES) -run $(TEST_PATTERN) -timeout=$(TEST_TIMEOUT) +.PHONY: test + +cover: test + go tool cover -html=coverage.out +.PHONY: cover + +fmt: + go mod tidy + gofumpt -w -l . +.PHONY: fmt + +ci: build test +.PHONY: ci + +build: + go build -tags 'release netgo osusergo' \ + -ldflags '$(linker_flags) -s -w -extldflags "-fno-PIC -static" -X main.pkgName=chglog -X main.version=$(SEMVER) -X main.commit=$(CI_COMMIT_SHORT_SHA)' \ + -o chglog ./cmd/chglog/... +.PHONY: build + +.DEFAULT_GOAL := build diff --git a/vendor/github.com/goreleaser/chglog/README.md b/vendor/github.com/goreleaser/chglog/README.md new file mode 100644 index 0000000000..2c9f439fa8 --- /dev/null +++ b/vendor/github.com/goreleaser/chglog/README.md @@ -0,0 +1,113 @@ +

    + GoReleaser Logo +

    chglog

    +

    chglog is a changelog management library and tool

    +

    + Release + Software License + GitHub Actions + Codecov branch + Go Report Card + Go Doc + Powered By: GoReleaser +

    +

    + +## Why + +While there are other tool out there that will create a changelog output as part +of their workflow none of the ones I could find did so in a way that allowed +formatting the output via multiple templates. + +The need to multiple output formats was being driven by the desire to add +changelog support to https://github.com/goreleaser/nfpm and the deb and rpm +changelog formats not being the same. + +## Goals + +* [x] be simple to use +* [x] provide decent default templates for deb, rpm, release and repository + style changelog formats +* [x] be distributed as a single binary +* [x] reproducible results + * [x] depend on the fewer external things as possible + * [x] store changelog in a transportable format (.yml) +* [x] be possible to use it as a lib in other go projects (namely + [goreleaser](https://goreleaser.com) itself) + +## Install +`go get github.com/goreleaser/chglog/cmd/chglog` + +## Usage + +The first steps are to run `chglog config` to initialize a config file +(`.chglog.yml`) and edit the generated file according to your needs: + +```yaml +conventional-commits: false +deb: + distribution: [] + urgency: "" +debug: false +owner: "" +package-name: "" + +``` + +The next step is to run `chglog init`. +```yaml +- semver: 0.0.1 + date: 2019-10-18T16:05:33-07:00 + packager: dj gilcrease + changes: + - commit: 2c499787328348f09ae1e8f03757c6483b9a938a + note: |- + oops i forgot to use Conventional Commits style message + + This should NOT break anything even if I am asking to build the changelog using Conventional Commits style message + - commit: 3ec1e9a60d07cc060cee727c97ffc8aac5713943 + note: |- + feat: added file two feature + + BREAKING CHANGE: this is a backwards incompatible change + - commit: 2cc00abc77d401a541d18c26e5c7fbef1effd3ed + note: |- + feat: added the fileone feature + + * This is a test repo + * so ya! +``` + +Then to generate a `CHANGELOG.md` file you would do `chglog format --template +repo > CHANGELOG.md` + +Now whenever you goto do another release you would do `chglog add --version +v#.#.#` (version MUST be semver format) + +And that's it! + + +## Usage as lib + +You can look at the code of chglog itself to see how to use it as a library + +## Status + +* alpha + +## Donate + +Donations are very much appreciated! You can donate/sponsor on the main +[goreleaser opencollective](https://opencollective.com/goreleaser)! It's +easy and will surely help the developers at least buy some ☕️ or 🍺! + +## Stargazers over time + +[![goreleaser/chglog stargazers over time](https://starchart.cc/goreleaser/chglog.svg)](https://starchart.cc/goreleaser/chglog) + +--- + +Would you like to fix something in the documentation? Feel free to open an +[issue](https://github.com/goreleaser/chglog/issues). diff --git a/vendor/github.com/goreleaser/chglog/add.go b/vendor/github.com/goreleaser/chglog/add.go new file mode 100644 index 0000000000..730128018f --- /dev/null +++ b/vendor/github.com/goreleaser/chglog/add.go @@ -0,0 +1,109 @@ +package chglog + +import ( + "errors" + "fmt" + "regexp" + "sort" + "strings" + "time" + + "github.com/go-git/go-git/v5" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/object" + conventional_commit "gitlab.com/digitalxero/go-conventional-commit" +) + +// ErrNoCommits happens when no commits are found for a given entry. +var ErrNoCommits = errors.New("no commits found for this entry") + +var signedOffRegEx = regexp.MustCompile(`(?m)(?:^.*Signed-off-by:.*>$)`) + +// AddEntry add a ChangeLog entry to an existing ChangeLogEntries that. +func AddEntry( + gitRepo *git.Repository, + version fmt.Stringer, + owner string, + notes *ChangeLogNotes, + deb *ChangelogDeb, + current ChangeLogEntries, + useConventionalCommits bool, +) (cle ChangeLogEntries, err error) { + var ( + ref *plumbing.Reference + from, to plumbing.Hash + commits []*object.Commit + ) + + if ref, err = gitRepo.Head(); err != nil { + return nil, fmt.Errorf("error adding entry: %w", err) + } + from = ref.Hash() + + to = plumbing.ZeroHash + if len(current) > 0 { + if to, err = GitHashFotTag(gitRepo, current[0].Semver); err != nil { + return nil, fmt.Errorf("error adding entry: %w", err) + } + } + + cle = append(cle, current...) + if commits, err = CommitsBetween(gitRepo, from, to); err != nil { + return nil, fmt.Errorf("error adding entry: %w", err) + } + + if len(commits) == 0 { + return nil, ErrNoCommits + } + + cle = append(cle, CreateEntry(time.Now(), version, owner, notes, deb, commits, useConventionalCommits)) + sort.Sort(sort.Reverse(cle)) + + return cle, nil +} + +func processMsg(msg string) string { + msg = strings.ReplaceAll(strings.ReplaceAll(msg, "\r\n\r\n", "\n\n"), "\r", "") + msg = signedOffRegEx.ReplaceAllString(msg, "") + msg = strings.ReplaceAll(strings.Trim(msg, "\n"), "\n\n\n", "\n") + + return msg +} + +// CreateEntry create a ChangeLog object. +func CreateEntry(date time.Time, version fmt.Stringer, owner string, notes *ChangeLogNotes, deb *ChangelogDeb, commits []*object.Commit, useConventionalCommits bool) (changelog *ChangeLog) { + var cc *conventional_commit.ConventionalCommit + changelog = &ChangeLog{ + Semver: version.String(), + Date: date, + Packager: owner, + Notes: notes, + } + if len(commits) == 0 { + return + } + changelog.Changes = make(ChangeLogChanges, len(commits)) + changelog.Deb = deb + + for idx, c := range commits { + msg := processMsg(c.Message) + if useConventionalCommits { + cc = conventional_commit.ParseConventionalCommit(msg) + } + changelog.Changes[idx] = &ChangeLogChange{ + Commit: c.Hash.String(), + Note: msg, + Committer: &User{ + Name: c.Committer.Name, + Email: c.Committer.Email, + }, + Author: &User{ + Name: c.Author.Name, + Email: c.Author.Email, + }, + ConventionalCommit: cc, + } + } + + return changelog +} diff --git a/vendor/github.com/goreleaser/chglog/files.go b/vendor/github.com/goreleaser/chglog/files.go new file mode 100644 index 0000000000..85ec759276 --- /dev/null +++ b/vendor/github.com/goreleaser/chglog/files.go @@ -0,0 +1,33 @@ +package chglog + +import ( + "fmt" + "os" + + "gopkg.in/yaml.v3" +) + +// Parse parse a changelog.yml into ChangeLogEntries. +func Parse(file string) (entries ChangeLogEntries, err error) { + var body []byte + body, err = os.ReadFile(file) // nolint: gosec,gocritic + switch { + case os.IsNotExist(err): + return make(ChangeLogEntries, 0), nil + case err != nil: + return nil, fmt.Errorf("error parsing %s: %w", file, err) + } + + if err = yaml.Unmarshal(body, &entries); err != nil { + return entries, fmt.Errorf("error parsing %s: %w", file, err) + } + + return entries, nil +} + +// Save save ChangeLogEntries to a yml file. +func (c *ChangeLogEntries) Save(file string) (err error) { + data, _ := yaml.Marshal(c) + // nolint: gosec,gocritic + return os.WriteFile(file, data, 0o644) +} diff --git a/vendor/github.com/goreleaser/chglog/format.go b/vendor/github.com/goreleaser/chglog/format.go new file mode 100644 index 0000000000..63bc69f844 --- /dev/null +++ b/vendor/github.com/goreleaser/chglog/format.go @@ -0,0 +1,17 @@ +package chglog + +import ( + "bytes" + "fmt" + "text/template" +) + +// FormatChangelog format pkgLogs from a text/template. +func FormatChangelog(pkgLogs *PackageChangeLog, tpl *template.Template) (string, error) { + var data bytes.Buffer + if err := tpl.Execute(&data, pkgLogs); err != nil { + return data.String(), fmt.Errorf("error formatting: %w", err) + } + + return data.String(), nil +} diff --git a/vendor/github.com/goreleaser/chglog/git.go b/vendor/github.com/goreleaser/chglog/git.go new file mode 100644 index 0000000000..08f9be1911 --- /dev/null +++ b/vendor/github.com/goreleaser/chglog/git.go @@ -0,0 +1,64 @@ +package chglog + +import ( + "errors" + "fmt" + "strings" + + "github.com/go-git/go-git/v5" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/object" +) + +var errReachedToCommit = errors.New("reached to commit") + +// GitRepo open a GitRepo to use to build the changelog from. +func GitRepo(gitPath string, detectDotGit bool) (*git.Repository, error) { + return git.PlainOpenWithOptions(gitPath, &git.PlainOpenOptions{ + DetectDotGit: detectDotGit, + }) +} + +// GitHashFotTag return the git sha for a particular tag. +func GitHashFotTag(gitRepo *git.Repository, tagName string) (hash plumbing.Hash, err error) { + var ref *plumbing.Reference + ref, err = gitRepo.Tag(tagName) + if errors.Is(err, git.ErrTagNotFound) && !strings.HasPrefix(tagName, "v") { + ref, err = gitRepo.Tag("v" + tagName) + } + if err != nil { + return plumbing.ZeroHash, fmt.Errorf("error getting commit for tag %s: %w", tagName, err) + } + + return ref.Hash(), nil +} + +// CommitsBetween return the list of commits between two commits. +func CommitsBetween(gitRepo *git.Repository, start, end plumbing.Hash) (commits []*object.Commit, err error) { + var commitIter object.CommitIter + if commitIter, err = gitRepo.Log(&git.LogOptions{ + From: start, + Order: git.LogOrderCommitterTime, + }); err != nil { + return nil, fmt.Errorf("error getting commits between %v & %v: %w", start, end, err) + } + defer commitIter.Close() + err = commitIter.ForEach(func(c *object.Commit) error { + // If no previous tag is found then from and to are equal + if end == start { + return nil + } + if c.Hash == end { + return errReachedToCommit + } + commits = append(commits, c) + + return nil + }) + + if err != nil && !errors.Is(err, errReachedToCommit) { + return nil, fmt.Errorf("error getting commits between %v & %v: %w", start, end, err) + } + + return commits, nil +} diff --git a/vendor/github.com/goreleaser/chglog/init.go b/vendor/github.com/goreleaser/chglog/init.go new file mode 100644 index 0000000000..18eb3e3d86 --- /dev/null +++ b/vendor/github.com/goreleaser/chglog/init.go @@ -0,0 +1,136 @@ +package chglog + +import ( + "fmt" + "os" + "sort" + + "github.com/Masterminds/semver/v3" + "github.com/go-git/go-git/v5" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/object" +) + +func versionsInRepo(gitRepo *git.Repository) (map[plumbing.Hash]*semver.Version, error) { + tagRefs, err := gitRepo.Tags() + if err != nil { + return nil, err + } + + defer tagRefs.Close() + + tags := make(map[plumbing.Hash]*semver.Version) + + err = tagRefs.ForEach(func(t *plumbing.Reference) error { + var ( + version *semver.Version + tag *object.Tag + ) + + tagName := t.Name().Short() + hash := t.Hash() + + if version, err = semver.NewVersion(tagName); err != nil || version == nil { + fmt.Fprintf(os.Stderr, "Warning: unable to parse version from tag: %s : %v\n", tagName, err) + return nil + } + + // If this is an annotated tag look up the hash of the commit and use that. + if tag, err = gitRepo.TagObject(t.Hash()); err == nil { + var c *object.Commit + + if c, err = tag.Commit(); err != nil { + return fmt.Errorf("cannot dereference annotated tag: %s : %w", tagName, err) + } + hash = c.Hash + } + + tags[hash] = version + + return nil + }) + + if err != nil { + return nil, err + } + + return tags, nil +} + +func versionsOnBranch(gitRepo *git.Repository) (map[*semver.Version]plumbing.Hash, error) { + repoVersions, err := versionsInRepo(gitRepo) + if err != nil { + return nil, err + } + + refs, err := gitRepo.Log(&git.LogOptions{}) + if err != nil { + return nil, err + } + + defer refs.Close() + + versions := make(map[*semver.Version]plumbing.Hash) + + err = refs.ForEach(func(c *object.Commit) error { + if v, ok := repoVersions[c.Hash]; ok { + versions[v] = c.Hash + } + return nil + }) + + return versions, err +} + +// InitChangelog create a new ChangeLogEntries from a git repo. +func InitChangelog(gitRepo *git.Repository, owner string, notes *ChangeLogNotes, deb *ChangelogDeb, useConventionalCommits bool) (cle ChangeLogEntries, err error) { + var start, end plumbing.Hash + + cle = make(ChangeLogEntries, 0) + end = plumbing.ZeroHash + + versions, err := versionsOnBranch(gitRepo) + if err != nil { + return nil, err + } + + tags := make([]*semver.Version, 0, len(versions)) + for v := range versions { + tags = append(tags, v) + } + + sort.Slice(tags, func(i, j int) bool { return tags[i].LessThan(tags[j]) }) + + for _, version := range tags { + var ( + commits []*object.Commit + commitObject *object.Commit + ) + + if version.Prerelease() != "" { + // Do not need change logs for pre-release entries + continue + } + + start = versions[version] + + if commitObject, err = gitRepo.CommitObject(start); err != nil { + return nil, fmt.Errorf("unable to fetch commit from tag %v: %w", version.Original(), err) + } + + if owner == "" { + owner = fmt.Sprintf("%s <%s>", commitObject.Committer.Name, commitObject.Committer.Email) + } + if commits, err = CommitsBetween(gitRepo, start, end); err != nil { + return nil, fmt.Errorf("unable to find commits between %s & %s: %w", end, start, err) + } + + changelog := CreateEntry(commitObject.Committer.When, version, owner, notes, deb, commits, useConventionalCommits) + cle = append(cle, changelog) + end = start + } + + sort.Sort(sort.Reverse(cle)) + + return cle, nil +} diff --git a/vendor/github.com/goreleaser/chglog/templates.go b/vendor/github.com/goreleaser/chglog/templates.go new file mode 100644 index 0000000000..2a1fa9af48 --- /dev/null +++ b/vendor/github.com/goreleaser/chglog/templates.go @@ -0,0 +1,74 @@ +package chglog + +import ( + "text/template" + + "github.com/Masterminds/sprig" +) + +const ( + rpmTpl = ` +{{- range .Entries }}{{$version := semver .Semver}} +* {{ date_in_zone "Mon Jan 2 2006" .Date "UTC" }} {{ .Packager }} - {{ $version.Major }}.{{ $version.Minor }}.{{ $version.Patch }}{{if $version.Prerelease}}-{{ $version.Prerelease }}{{end}} +{{- range .Changes }}{{$note := splitList "\n" .Note}} + - {{ first $note }}{{ range $i,$n := (rest $note) }}{{if ne $n "\n"}} {{$n}}{{end}} + {{end}} +{{- end }} +{{ end }} +` + debTpl = `{{- $name := .Name}} +{{- range .Entries }} +{{ $name }} ({{ .Semver }}){{if .Deb}} {{default "" (.Deb.Distributions | join " ")}}; urgency={{default "low" .Deb.Urgency}}{{end}} + {{- range .Changes }}{{$note := splitList "\n" .Note}} + * {{ first $note }} + {{- range $i,$n := (rest $note) }} + {{- if ne (trim $n) ""}} + - {{$n}}{{end}} +{{- end}}{{end}} + + -- {{ .Packager }} {{ date_in_zone "Mon, 02 Jan 2006 03:04:05 -0700" .Date "UTC" }} +{{ end }} +` + releaseTpl = ` +Changelog +========= +{{- with (first .Entries)}} +{{range .Changes }}{{$note := splitList "\n" .Note}} +{{substr 0 8 .Commit}} {{ first $note }}{{end}} +{{ end}} +` + repoTpl = ` +{{- range .Entries }} +{{ .Semver }} +============= +{{ date_in_zone "2006-01-02" .Date "UTC" }} +{{range .Changes }}{{$note := splitList "\n" .Note}} +* {{ first $note }} ({{substr 0 8 .Commit}}){{end}} +{{ end}} +` +) + +// LoadTemplateData load a template from string with all of the sprig.TxtFuncMap loaded. +func LoadTemplateData(data string) (*template.Template, error) { + return template.New("base").Funcs(sprig.TxtFuncMap()).Parse(data) +} + +// DebTemplate load default debian template. +func DebTemplate() (*template.Template, error) { + return LoadTemplateData(debTpl) +} + +// RPMTemplate load default RPM template. +func RPMTemplate() (*template.Template, error) { + return LoadTemplateData(rpmTpl) +} + +// ReleaseTemplate load default release template. +func ReleaseTemplate() (*template.Template, error) { + return LoadTemplateData(releaseTpl) +} + +// RepoTemplate load default repo template. +func RepoTemplate() (*template.Template, error) { + return LoadTemplateData(repoTpl) +} diff --git a/vendor/github.com/goreleaser/chglog/types.go b/vendor/github.com/goreleaser/chglog/types.go new file mode 100644 index 0000000000..9f6246a7dd --- /dev/null +++ b/vendor/github.com/goreleaser/chglog/types.go @@ -0,0 +1,96 @@ +// Package chglog contains the public API for working with a changlog.yml file +package chglog + +import ( + "time" + + "github.com/Masterminds/semver/v3" + conventional_commit "gitlab.com/digitalxero/go-conventional-commit" +) + +// ChangeLogEntries list of ChangeLog entries. +type ChangeLogEntries []*ChangeLog + +// Len returns the length of a collection. The number of Version instances +// on the slice. +func (c ChangeLogEntries) Len() int { + return len(c) +} + +// Less is needed for the sort interface to compare two Version objects on the +// slice. If checks if one is less than the other. +func (c ChangeLogEntries) Less(i, j int) bool { + v1, err := semver.NewVersion(c[i].Semver) + if err != nil { + return true + } + v2, err := semver.NewVersion(c[j].Semver) + if err != nil { + return false + } + + return v1.LessThan(v2) +} + +// Swap is needed for the sort interface to replace the Version objects +// at two different positions in the slice. +func (c ChangeLogEntries) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} + +// PackageChangeLog used for the formatting API. +type PackageChangeLog struct { + Name string `yaml:"name"` + Entries ChangeLogEntries `yaml:"entries"` +} + +// ChangeLog a single changelog entry. +type ChangeLog struct { + ChangeLogOverridables `yaml:",inline"` + Semver string `yaml:"semver"` + Date time.Time `yaml:"date"` + Packager string `yaml:"packager"` + Notes *ChangeLogNotes `yaml:"notes,omitempty"` + Changes ChangeLogChanges `yaml:"changes,omitempty"` +} + +// ChangeLogOverridables contains potential format specific fields. +type ChangeLogOverridables struct { + Deb *ChangelogDeb `yaml:"deb,omitempty"` +} + +// ChangelogDeb contains fields specific to the debian changelog format +// https://www.debian.org/doc/debian-policy/ch-source.html#s-dpkgchangelog +type ChangelogDeb struct { + Urgency string `yaml:"urgency"` + Distributions []string `yaml:"distributions"` +} + +// ChangeLogNotes contains a potential header/footer string for output formatting. +type ChangeLogNotes struct { + Header *string `yaml:"header,omitempty"` + Footer *string `yaml:"footer,omitempty"` +} + +// ChangeLogChanges list of individual changes. +type ChangeLogChanges []*ChangeLogChange + +// ChangeLogChange an individual change. +type ChangeLogChange struct { + Commit string `yaml:"commit"` + Note string `yaml:"note"` + // Author is the original author of the commit. + Author *User `yaml:"author,omitempty"` + // Committer is the one performing the commit, might be different from + // Author. + Committer *User `yaml:"committer,omitempty"` + ConventionalCommit *conventional_commit.ConventionalCommit `yaml:"conventional_commit,omitempty"` +} + +// User is used to identify who created a commit or tag. +type User struct { + // Name represents a person name. It is an arbitrary string. + Name string `yaml:"name"` + // Email is an email, but it cannot be assumed to be well-formed. + Email string `yaml:"email"` +} diff --git a/vendor/github.com/goreleaser/fileglob/.gitignore b/vendor/github.com/goreleaser/fileglob/.gitignore new file mode 100644 index 0000000000..3f58b26aa5 --- /dev/null +++ b/vendor/github.com/goreleaser/fileglob/.gitignore @@ -0,0 +1,16 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ +coverage.txt diff --git a/vendor/github.com/goreleaser/fileglob/.golangci.yml b/vendor/github.com/goreleaser/fileglob/.golangci.yml new file mode 100644 index 0000000000..45e61830c6 --- /dev/null +++ b/vendor/github.com/goreleaser/fileglob/.golangci.yml @@ -0,0 +1,13 @@ +linters: + enable-all: true + disable: + - godox + - wsl + - gomnd + - testpackage + - nolintlint + - gofumpt + - nlreturn + - gci + - exhaustivestruct + - varnamelen diff --git a/vendor/github.com/goreleaser/fileglob/.goreleaser.yml b/vendor/github.com/goreleaser/fileglob/.goreleaser.yml new file mode 100644 index 0000000000..96e6403d24 --- /dev/null +++ b/vendor/github.com/goreleaser/fileglob/.goreleaser.yml @@ -0,0 +1,20 @@ +builds: +- skip: true +changelog: + sort: asc + use: github + filters: + exclude: + - Merge pull request + - Merge remote-tracking branch + - Merge branch + - go mod tidy + groups: + - title: 'New Features' + regexp: "^.*feat[(\\w)]*:+.*$" + order: 0 + - title: 'Bug fixes' + regexp: "^.*fix[(\\w)]*:+.*$" + order: 10 + - title: Other work + order: 999 diff --git a/vendor/github.com/goreleaser/fileglob/LICENSE b/vendor/github.com/goreleaser/fileglob/LICENSE new file mode 100644 index 0000000000..4070691cdb --- /dev/null +++ b/vendor/github.com/goreleaser/fileglob/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Carlos Alexandro Becker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/goreleaser/fileglob/README.md b/vendor/github.com/goreleaser/fileglob/README.md new file mode 100644 index 0000000000..430e144d5d --- /dev/null +++ b/vendor/github.com/goreleaser/fileglob/README.md @@ -0,0 +1,40 @@ +

    + GoReleaser Logo +

    +

    fileglob

    +

    A file globbing library.

    +

    + Release + Software License + GitHub Actions + Codecov branch + Go Report Card + Go Doc + Powered By: GoReleaser +

    + +## What + +`fileglob` is a glob library that uses [gobwas/glob](https://github.com/gobwas/glob) underneath +and returns only matching files or direcories, depending on the configuration. Due to this great foundation, `fileglob` supports: + +* Asterisk wildcards (`*`) +* Super-asterisk wildcards (`**`) +* Single symbol wildcards (`?`) +* Character list matchers with negation and ranges (`[abc]`, `[!abc]`, `[a-c]`) +* Alternative matchers (`{a,b}`) +* Nested globbing (`{a,[bc]}`) +* Escapable wildcards (`\{a\}/\*` and `fileglob.QuoteMeta(pattern)`) + +By also building on top of `fs.FS`, a range of alternative filesystems as well as custom filesystems are supported. + +## Why + +[gobwas/glob](https://github.com/gobwas/glob) is very well implemented: it has +a lexer, compiler, and all that, which seems like a better approach than most +libraries do: regex. + +It doesn't have a `Walk` method though, and we needed it +[in a couple of places](https://github.com/goreleaser/fileglob/issues/232). +So we decided to implement it ourselves, a little bit based on how +[mattn/go-zglob](http://github.com/mattn/go-zglob) works. diff --git a/vendor/github.com/goreleaser/fileglob/doc.go b/vendor/github.com/goreleaser/fileglob/doc.go new file mode 100644 index 0000000000..4bdb1f8698 --- /dev/null +++ b/vendor/github.com/goreleaser/fileglob/doc.go @@ -0,0 +1,2 @@ +// Package fileglob provides a filesystem glob API. +package fileglob diff --git a/vendor/github.com/goreleaser/fileglob/glob.go b/vendor/github.com/goreleaser/fileglob/glob.go new file mode 100644 index 0000000000..e30a9ab5c0 --- /dev/null +++ b/vendor/github.com/goreleaser/fileglob/glob.go @@ -0,0 +1,242 @@ +package fileglob + +import ( + "errors" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "strings" + + "github.com/gobwas/glob" +) + +const ( + separatorRune = '/' + separatorString = string(separatorRune) +) + +type globOptions struct { + fs fs.FS + + // if matchDirectories directly is set to true a matching directory will + // be treated just like a matching file. If set to false, a matching directory + // will auto-match all files inside instead of the directory itself. + matchDirectoriesDirectly bool + + prefix string + + pattern string +} + +// OptFunc is a function that allow to customize Glob. +type OptFunc func(opts *globOptions) + +// WithFs allows to provide another fs.FS implementation to Glob. +func WithFs(f fs.FS) OptFunc { + return func(opts *globOptions) { + opts.fs = f + } +} + +// MaybeRootFS setups fileglob to walk from the root directory (/) or +// volume (on windows) if the given pattern is an absolute path. +// +// Result will also be prepended with the root path or volume. +func MaybeRootFS(opts *globOptions) { + if !filepath.IsAbs(opts.pattern) { + return + } + prefix := "" + if strings.HasPrefix(opts.pattern, separatorString) { + prefix = separatorString + } + if vol := filepath.VolumeName(opts.pattern); vol != "" { + prefix = vol + "/" + } + if prefix != "" { + opts.prefix = prefix + opts.fs = os.DirFS(prefix) + } +} + +// WriteOptions write the current options to the given writer. +func WriteOptions(w io.Writer) OptFunc { + return func(opts *globOptions) { + _, _ = fmt.Fprintf(w, "%+v", opts) + } +} + +// MatchDirectoryIncludesContents makes a match on a directory match all +// files inside it as well. +// +// This is the default behavior. +// +// Also check MatchDirectoryAsFile. +func MatchDirectoryIncludesContents(opts *globOptions) { + opts.matchDirectoriesDirectly = false +} + +// MatchDirectoryAsFile makes a match on a directory match its name only. +// +// Also check MatchDirectoryIncludesContents. +func MatchDirectoryAsFile(opts *globOptions) { + opts.matchDirectoriesDirectly = true +} + +// QuoteMeta quotes all glob pattern meta characters inside the argument text. +// For example, QuoteMeta for a pattern `{foo*}` sets the pattern to `\{foo\*\}`. +func QuoteMeta(opts *globOptions) { + opts.pattern = glob.QuoteMeta(opts.pattern) +} + +// toNixPath converts the path to the nix style path +// Windows style path separators are escape characters so cause issues with the compiled glob. +func toNixPath(s string) string { + return filepath.ToSlash(filepath.Clean(s)) +} + +// Glob returns all files that match the given pattern in the current directory. +// If the given pattern indicates an absolute path, it will glob from `/`. +// If the given pattern starts with `../`, it will resolve to its absolute path and glob from `/`. +func Glob(pattern string, opts ...OptFunc) ([]string, error) { // nolint:funlen,cyclop + var matches []string + + if strings.HasPrefix(pattern, "../") { + p, err := filepath.Abs(pattern) + if err != nil { + return matches, fmt.Errorf("failed to resolve pattern: %s: %w", pattern, err) + } + pattern = filepath.ToSlash(p) + } + + options := compileOptions(opts, pattern) + + pattern = strings.TrimSuffix(strings.TrimPrefix(options.pattern, options.prefix), separatorString) + matcher, err := glob.Compile(pattern, separatorRune) + if err != nil { + return matches, fmt.Errorf("compile glob pattern: %w", err) + } + + prefix, err := staticPrefix(pattern) + if err != nil { + return nil, fmt.Errorf("cannot determine static prefix: %w", err) + } + + // Check if the file is valid symlink without following it + // It works only for valid absolut or relative file paths, in other words, will fail for WithFs() option + if patternInfo, err := os.Lstat(pattern); err == nil { // nolint:govet + if patternInfo.Mode()&os.ModeSymlink == os.ModeSymlink { + return cleanFilepaths([]string{pattern}, options.prefix), nil + } + } + + prefixInfo, err := fs.Stat(options.fs, prefix) + if errors.Is(err, fs.ErrNotExist) { + if !ContainsMatchers(pattern) { + // glob contains no dynamic matchers so prefix is the file name that + // the glob references directly. When the glob explicitly references + // a single non-existing file, return an error for the user to check. + return []string{}, fmt.Errorf(`matching "%s%s": %w`, options.prefix, prefix, fs.ErrNotExist) + } + + return []string{}, nil + } + if err != nil { + return nil, fmt.Errorf("stat static prefix %s%s: %w", options.prefix, prefix, err) + } + + if !prefixInfo.IsDir() { + // if the prefix is a file, it either has to be + // the only match, or nothing matches at all + if matcher.Match(prefix) { + return cleanFilepaths([]string{prefix}, options.prefix), nil + } + + return []string{}, nil + } + + if err := fs.WalkDir(options.fs, prefix, func(path string, info fs.DirEntry, err error) error { + if err != nil { + return err + } + + // The glob ast from github.com/gobwas/glob only works properly with linux paths + path = toNixPath(path) + if !matcher.Match(path) { + return nil + } + + if info.IsDir() { + if options.matchDirectoriesDirectly { + matches = append(matches, path) + return nil + } + + // a direct match on a directory implies that all files inside + // match if options.matchFolders is false + filesInDir, err := filesInDirectory(options, path) + if err != nil { + return err + } + + matches = append(matches, filesInDir...) + return fs.SkipDir + } + + matches = append(matches, path) + + return nil + }); err != nil { + return nil, fmt.Errorf("glob failed: %w", err) + } + + return cleanFilepaths(matches, options.prefix), nil +} + +func compileOptions(optFuncs []OptFunc, pattern string) *globOptions { + opts := &globOptions{ + fs: os.DirFS("."), + prefix: "./", + pattern: pattern, + } + + for _, apply := range optFuncs { + apply(opts) + } + + return opts +} + +func filesInDirectory(options *globOptions, dir string) ([]string, error) { + var files []string + + err := fs.WalkDir(options.fs, dir, func(path string, info fs.DirEntry, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } + path = toNixPath(path) + files = append(files, path) + return nil + }) + if err != nil { + return files, fmt.Errorf("failed to get files in directory: %w", err) + } + return files, nil +} + +func cleanFilepaths(paths []string, prefix string) []string { + if prefix == "./" { + // if prefix is relative, no prefix and ./ is the same thing, ignore + return paths + } + result := make([]string, len(paths)) + for i, p := range paths { + result[i] = prefix + p + } + return result +} diff --git a/vendor/github.com/goreleaser/fileglob/prefix.go b/vendor/github.com/goreleaser/fileglob/prefix.go new file mode 100644 index 0000000000..230ee5019b --- /dev/null +++ b/vendor/github.com/goreleaser/fileglob/prefix.go @@ -0,0 +1,96 @@ +package fileglob + +import ( + "fmt" + "strings" + + "github.com/gobwas/glob/syntax/ast" + "github.com/gobwas/glob/syntax/lexer" +) + +// ValidPattern determines whether a pattern is valid. It returns the parser +// error if the pattern is invalid and nil otherwise. +func ValidPattern(pattern string) error { + _, err := ast.Parse(lexer.NewLexer(pattern)) + return err // nolint:wrapcheck +} + +// ContainsMatchers determines whether the pattern contains any type of glob +// matcher. It will also return false if the pattern is an invalid expression. +func ContainsMatchers(pattern string) bool { + rootNode, err := ast.Parse(lexer.NewLexer(pattern)) + if err != nil { + return false + } + + _, isStatic := staticText(rootNode) + return !isStatic +} + +// staticText returns the static string matcher represented by the AST unless +// it contains dynamic matchers (wildcards, etc.). In this case the ok return +// value is false. +func staticText(node *ast.Node) (text string, ok bool) { + // nolint:exhaustive + switch node.Kind { + case ast.KindPattern: + text := "" + + for _, child := range node.Children { + childText, ok := staticText(child) + if !ok { + return "", false + } + + text += childText + } + + return text, true + case ast.KindText: + t, ok := node.Value.(ast.Text) + if !ok { + return "", false + } + return t.Text, true + case ast.KindNothing: + return "", true + default: + return "", false + } +} + +// staticPrefix returns the file path inside the pattern up +// to the first path element that contains a wildcard. +func staticPrefix(pattern string) (string, error) { + parts := strings.Split(pattern, separatorString) + + // nolint:prealloc + var prefixPath []string + for _, part := range parts { + if part == "" { + continue + } + + rootNode, err := ast.Parse(lexer.NewLexer(part)) + if err != nil { + return "", fmt.Errorf("parse glob pattern: %w", err) + } + + staticPart, ok := staticText(rootNode) + if !ok { + break + } + + prefixPath = append(prefixPath, staticPart) + } + prefix := strings.Join(prefixPath, separatorString) + if len(pattern) > 0 && rune(pattern[0]) == separatorRune && !strings.HasPrefix(prefix, separatorString) { + prefix = separatorString + prefix + } + + if prefix == "" { + prefix = "." + } + + return prefix, nil +} diff --git a/vendor/github.com/goreleaser/nfpm/v2/.gitattributes b/vendor/github.com/goreleaser/nfpm/v2/.gitattributes new file mode 100644 index 0000000000..3021bad2b7 --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/.gitattributes @@ -0,0 +1,7 @@ +* text eol=lf + +# Denote all files that are truly binary and should not be modified. +*.png binary +*.jpg binary +*.ico binary +www/docs/static/schema.json linguist-generated=true diff --git a/vendor/github.com/goreleaser/nfpm/v2/.gitignore b/vendor/github.com/goreleaser/nfpm/v2/.gitignore new file mode 100644 index 0000000000..5f0850b704 --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/.gitignore @@ -0,0 +1,20 @@ +vendor +*.rpm +*.deb +!dummy.deb +*.apk +coverage.txt +dist +nfpm.yaml +.DS_Store +bin +coverage.out +/nfpm +www/site +.idea/ +testdata/acceptance/tmp/ +completions/ +.task/ +cosign.* +manpages +output.json diff --git a/vendor/github.com/goreleaser/nfpm/v2/.golangci.yml b/vendor/github.com/goreleaser/nfpm/v2/.golangci.yml new file mode 100644 index 0000000000..538f529c7b --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/.golangci.yml @@ -0,0 +1,6 @@ +run: + timeout: 5m +linters: + enable: + - thelper + - gofumpt diff --git a/vendor/github.com/goreleaser/nfpm/v2/.goreleaser.yml b/vendor/github.com/goreleaser/nfpm/v2/.goreleaser.yml new file mode 100644 index 0000000000..4333867d11 --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/.goreleaser.yml @@ -0,0 +1,256 @@ +# yaml-language-server: $schema=https://goreleaser.com/static/schema-pro.json + +env: + - GO111MODULE=on + +before: + hooks: + - go mod tidy + - ./scripts/completions.sh + - ./scripts/manpages.sh + +gomod: + proxy: true + +builds: +- main: ./cmd/nfpm + env: + - CGO_ENABLED=0 + goos: + - linux + - darwin + - windows + goarch: + - amd64 + - arm64 + mod_timestamp: '{{ .CommitTimestamp }}' + flags: + - -trimpath + ldflags: + - -s -w -X main.version={{ .Version }} -X main.commit={{ .Commit }} -X main.date={{ .CommitDate }} -X main.builtBy=goreleaser + +dockers: + - image_templates: + - 'goreleaser/nfpm:{{ .Tag }}-amd64' + - 'ghcr.io/goreleaser/nfpm:{{ .Tag }}-amd64' + dockerfile: Dockerfile + use: buildx + build_flag_templates: + - "--pull" + - "--label=org.opencontainers.image.created={{.Date}}" + - "--label=org.opencontainers.image.name={{.ProjectName}}" + - "--label=org.opencontainers.image.revision={{.FullCommit}}" + - "--label=org.opencontainers.image.version={{.Version}}" + - "--label=org.opencontainers.image.source={{.GitURL}}" + - "--platform=linux/amd64" + - image_templates: + - 'goreleaser/nfpm:{{ .Tag }}-arm64v8' + - 'ghcr.io/goreleaser/nfpm:{{ .Tag }}-arm64v8' + dockerfile: Dockerfile + use: buildx + build_flag_templates: + - "--pull" + - "--label=org.opencontainers.image.created={{.Date}}" + - "--label=org.opencontainers.image.name={{.ProjectName}}" + - "--label=org.opencontainers.image.revision={{.FullCommit}}" + - "--label=org.opencontainers.image.version={{.Version}}" + - "--label=org.opencontainers.image.source={{.GitURL}}" + - "--platform=linux/arm64/v8" + goarch: arm64 + +docker_manifests: + - name_template: 'goreleaser/nfpm:{{ .Tag }}' + image_templates: + - 'goreleaser/nfpm:{{ .Tag }}-amd64' + - 'goreleaser/nfpm:{{ .Tag }}-arm64v8' + - name_template: 'ghcr.io/goreleaser/nfpm:{{ .Tag }}' + image_templates: + - 'ghcr.io/goreleaser/nfpm:{{ .Tag }}-amd64' + - 'ghcr.io/goreleaser/nfpm:{{ .Tag }}-arm64v8' + - name_template: 'goreleaser/nfpm:latest' + image_templates: + - 'goreleaser/nfpm:{{ .Tag }}-amd64' + - 'goreleaser/nfpm:{{ .Tag }}-arm64v8' + - name_template: 'ghcr.io/goreleaser/nfpm:latest' + image_templates: + - 'ghcr.io/goreleaser/nfpm:{{ .Tag }}-amd64' + - 'ghcr.io/goreleaser/nfpm:{{ .Tag }}-arm64v8' + +archives: +- replacements: + darwin: Darwin + linux: Linux + windows: Windows + 386: i386 + amd64: x86_64 + format_overrides: + - goos: windows + format: zip + files: + - README.md + - LICENSE.md + - completions/* + - manpages/* + +brews: +- tap: + owner: goreleaser + name: homebrew-tap + folder: Formula + homepage: https://nfpm.goreleaser.com + description: nFPM is a simple, 0-dependencies, deb, rpm and apk packager. + license: MIT + test: | + system "#{bin}/nfpm -v" + install: |- + bin.install "nfpm" + bash_completion.install "completions/nfpm.bash" => "nfpm" + zsh_completion.install "completions/nfpm.zsh" => "_nfpm" + fish_completion.install "completions/nfpm.fish" + man1.install "manpages/nfpm.1.gz" + +scoop: + bucket: + owner: goreleaser + name: scoop-bucket + homepage: https://nfpm.goreleaser.com + description: nFPM is a simple, 0-dependencies, deb, rpm and apk packager. + license: MIT + +nfpms: +- file_name_template: '{{ .ProjectName }}_{{ .Arch }}' + homepage: https://nfpm.goreleaser.com + description: |- + Simple, 0-dependencies, deb, rpm and apk packager. + nFPM (not FPM) is configurable via YAML and does not need any + packaging software installed. + maintainer: Carlos Alexandro Becker + license: MIT + vendor: GoReleaser + formats: + - apk + - deb + - rpm + bindir: /usr/bin + section: utils + contents: + - src: ./completions/nfpm.bash + dst: /usr/share/bash-completion/completions/nfpm + file_info: + mode: 0644 + - src: ./completions/nfpm.fish + dst: /usr/share/fish/vendor_completions.d/nfpm.fish + file_info: + mode: 0644 + - src: ./completions/nfpm.zsh + dst: /usr/share/zsh/vendor-completions/_nfpm + file_info: + mode: 0644 + - src: ./manpages/nfpm.1.gz + dst: /usr/share/man/man1/nfpm.1.gz + file_info: + mode: 0644 + - src: ./LICENSE.md + dst: /usr/share/doc/nfpm/copyright + file_info: + mode: 0644 + - src: .lintian-overrides + dst: ./usr/share/lintian/overrides/nfpm + packager: deb + file_info: + mode: 0644 + +aurs: + - homepage: https://nfpm.goreleaser.com + description: nFPM is a simple, 0-dependencies, deb, rpm and apk packager. + maintainers: + - 'Fernandez Ludovic ' + - 'Carlos Alexandro Becker ' + license: MIT + private_key: '{{ .Env.AUR_KEY }}' + git_url: 'ssh://aur@aur.archlinux.org/nfpm-bin.git' + package: |- + # bin + install -Dm755 "./nfpm" "${pkgdir}/usr/bin/nfpm" + # license + install -Dm644 "./LICENSE.md" "${pkgdir}/usr/share/licenses/nfpm/LICENSE" + # completions + mkdir -p "${pkgdir}/usr/share/bash-completion/completions/" + mkdir -p "${pkgdir}/usr/share/zsh/site-functions/" + mkdir -p "${pkgdir}/usr/share/fish/vendor_completions.d/" + install -Dm644 "./completions/nfpm.bash" "${pkgdir}/usr/share/bash-completion/completions/nfpm" + install -Dm644 "./completions/nfpm.zsh" "${pkgdir}/usr/share/zsh/site-functions/_nfpm" + install -Dm644 "./completions/nfpm.fish" "${pkgdir}/usr/share/fish/vendor_completions.d/nfpm.fish" + # man pages + install -Dm644 "./manpages/nfpm.1.gz" "${pkgdir}/usr/share/man/man1/nfpm.1.gz" + +furies: + - account: goreleaser + +sboms: +- artifacts: archive +signs: +- cmd: cosign + env: + - COSIGN_EXPERIMENTAL=1 + certificate: '${artifact}.pem' + output: true + artifacts: checksum + args: + - sign-blob + - '--output-certificate=${certificate}' + - '--output-signature=${signature}' + - '${artifact}' +docker_signs: +- cmd: cosign + env: + - COSIGN_EXPERIMENTAL=1 + artifacts: manifests + output: true + args: + - 'sign' + - '${artifact}' + +changelog: + sort: asc + use: github + filters: + exclude: + - '^docs:' + - '^test:' + - '^chore' + - Merge pull request + - Merge remote-tracking branch + - Merge branch + - go mod tidy + groups: + - title: 'New Features' + regexp: "^.*feat[(\\w)]*:+.*$" + order: 0 + - title: 'Bug fixes' + regexp: "^.*fix[(\\w)]*:+.*$" + order: 10 + - title: Other work + order: 999 + +checksum: + name_template: 'checksums.txt' + +release: + footer: | + **Full Changelog**: https://github.com/goreleaser/nfpm/compare/{{ .PreviousTag }}...{{ .Tag }} + + --- + + - Check out [GoReleaser](https://goreleaser.com): it integrates nFPM to the release pipeline of your Go projects. + +milestones: + - close: true + +announce: + twitter: + enabled: true + message_template: "nFPM {{ .Tag }} was just released! See what's new: https://github.com/goreleaser/nfpm/releases/tag/{{ .Tag }}" + discord: + enabled: true + message_template: "nFPM {{ .Tag }} is out! See what's new: https://github.com/goreleaser/nfpm/releases/tag/{{ .Tag }}" diff --git a/vendor/github.com/goreleaser/nfpm/v2/.lintian-overrides b/vendor/github.com/goreleaser/nfpm/v2/.lintian-overrides new file mode 100644 index 0000000000..c4f83216d1 --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/.lintian-overrides @@ -0,0 +1,2 @@ +nfpm: statically-linked-binary +nfpm: changelog-file-missing-in-native-package diff --git a/vendor/github.com/goreleaser/nfpm/v2/CODE_OF_CONDUCT.md b/vendor/github.com/goreleaser/nfpm/v2/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..fcf20b2cb6 --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at root@carlosbecker.com. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/goreleaser/nfpm/v2/CONTRIBUTING.md b/vendor/github.com/goreleaser/nfpm/v2/CONTRIBUTING.md new file mode 100644 index 0000000000..7a5e912f2c --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/CONTRIBUTING.md @@ -0,0 +1,88 @@ +# Contributing + +By participating to this project, you agree to abide our [code of conduct](https://github.com/goreleaser/nfpm/blob/main/CODE_OF_CONDUCT.md). + +## Setup your machine + +`nfpm` is written in [Go](https://golang.org/). + +Prerequisites: + +- [Task](https://taskfile.dev/#/installation) +- [Go 1.19+](https://golang.org/doc/install) +- [Docker](https://www.docker.com/) +- `gpg` (probably already installed on your system) + +Clone `nfpm` from source: + +```sh +git clone git@github.com:goreleaser/nfpm.git +cd nfpm +``` + +Install the build and lint dependencies: + +```console +task setup +``` + +A good way of making sure everything is all right is running the test suite: + +```console +task test +``` + +If on the ARM tests you are seeing `standard_init_linux.go:211: exec user process caused "exec format error"`: + +```console +sudo docker run --rm --privileged hypriot/qemu-register +``` + +## Test your change + +You can create a branch for your changes and try to build from the source as you go: + +```console +task build +``` + +When you are satisfied with the changes, we suggest you run: + +```console +task ci +``` + +Which runs all the linters and tests. + +## Create a commit + +Commit messages should be well formatted. +Start your commit message with the type. Choose one of the following: +`feat`, `fix`, `docs`, `style`, `refactor`, `perf`, `test`, `chore`, `revert`, `add`, `remove`, `move`, `bump`, `update`, `release` + +After a colon, you should give the message a title, starting with uppercase and ending without a dot. +Keep the width of the text at 72 chars. +The title must be followed with a newline, then a more detailed description. + +Please reference any GitHub issues on the last line of the commit message (e.g. `See #123`, `Closes #123`, `Fixes #123`). + +An example: + +``` +docs: Add example for --release-notes flag + +I added an example to the docs of the `--release-notes` flag to make +the usage more clear. The example is a realistic use case and might +help others to generate their own changelog. + +See #284 +``` + +## Submit a pull request + +Push your branch to your `nfpm` fork and open a pull request against the main branch. + +## Financial contributions + +We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/goreleaser). +Anyone can file an expense. If the expense makes sense for the development of the community, it will be "merged" in the ledger of our open collective by the core contributors and the person who filed the expense will be reimbursed. diff --git a/vendor/github.com/goreleaser/nfpm/v2/Dockerfile b/vendor/github.com/goreleaser/nfpm/v2/Dockerfile new file mode 100644 index 0000000000..7d82b1aaf0 --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/Dockerfile @@ -0,0 +1,4 @@ +FROM alpine +COPY nfpm_*.apk /tmp/ +RUN apk add --allow-untrusted /tmp/nfpm_*.apk +ENTRYPOINT ["/usr/bin/nfpm"] diff --git a/vendor/github.com/goreleaser/nfpm/v2/LICENSE.md b/vendor/github.com/goreleaser/nfpm/v2/LICENSE.md new file mode 100644 index 0000000000..127ad97dc0 --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/LICENSE.md @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 GoReleaser + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/goreleaser/nfpm/v2/README.md b/vendor/github.com/goreleaser/nfpm/v2/README.md new file mode 100644 index 0000000000..2c5b0a8638 --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/README.md @@ -0,0 +1,56 @@ +

    + GoReleaser Logo +

    nFPM

    +

    nFPM is a simple, 0-dependencies, deb, rpm and apk packager.

    +

    + Release + Software License + GitHub Actions + Codecov branch + Go Report Card + Go Doc + Powered By: GoReleaser +

    +

    + +## Why + +While [fpm][] is great, for me, it is a bummer that it depends on `ruby`, `tar` +and other software. + +I wanted something that could be used as a binary and/or as a library and that +was really simple. + +So I created nFPM: a simpler, 0-dependency, as-little-assumptions-as-possible alternative to fpm. + +## Usage + +Check the documentation at https://nfpm.goreleaser.com + +## Special thanks 🙏 + +Thanks to the [fpm][] authors for fpm, which inspires nfpm a lot. + +## Community + +You have questions, need support and or just want to talk about GoReleaser/nFPM? + +Here are ways to get in touch with the GoReleaser community: + +[![Join Discord](https://img.shields.io/badge/Join_our_Discord_server-5865F2?style=for-the-badge&logo=discord&logoColor=white)](https://discord.gg/RGEBtg8vQ6) +[![Follow Twitter](https://img.shields.io/badge/follow_on_twitter-1DA1F2?style=for-the-badge&logo=twitter&logoColor=white)](https://twitter.com/goreleaser) +[![GitHub Discussions](https://img.shields.io/badge/GITHUB_DISCUSSION-181717?style=for-the-badge&logo=github&logoColor=white)](https://github.com/goreleaser/nfpm/discussions) + +## Donate + +Donations are very much appreciated! You can donate/sponsor on the main +[goreleaser opencollective](https://opencollective.com/goreleaser)! It's +easy and will surely help the developers at least buy some ☕️ or 🍺! + +## Stargazers over time + +[![goreleaser/nfpm stargazers over time](https://starchart.cc/goreleaser/nfpm.svg)](https://starchart.cc/goreleaser/nfpm) + +--- + +[fpm]: https://github.com/jordansissel/fpm diff --git a/vendor/github.com/goreleaser/nfpm/v2/SECURITY.md b/vendor/github.com/goreleaser/nfpm/v2/SECURITY.md new file mode 100644 index 0000000000..d2ed6fcc29 --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/SECURITY.md @@ -0,0 +1,9 @@ +# Security Policy + +## Supported Versions + +Only the last stable version at any given point. + +## Reporting a Vulnerability + +Vulnerabilies can be disclosed via email to carlos@becker.software diff --git a/vendor/github.com/goreleaser/nfpm/v2/Taskfile.yml b/vendor/github.com/goreleaser/nfpm/v2/Taskfile.yml new file mode 100644 index 0000000000..3fe5916418 --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/Taskfile.yml @@ -0,0 +1,135 @@ +# https://taskfile.dev + +version: '3' + +env: + GO111MODULE: on + GOPROXY: https://proxy.golang.org,direct + DOCKER_BUILDKIT: 1 + +vars: + DOCKER: '{{default "docker" .DOCKER}}' + +tasks: + dev: + desc: Setup git hooks + cmds: + - git config core.hooksPath .githooks + + setup: + desc: Install dependencies + cmds: + - go mod tidy + + build: + desc: Build the binary + sources: + - ./**/*.go + generates: + - ./nfpm + cmds: + - go build ./cmd/nfpm + + acceptance:pull: + desc: Pull acceptance test images + vars: + IMGS: + sh: grep FROM ./testdata/acceptance/*.dockerfile | cut -f2 -d' ' | grep -v min | grep -v test | sort | uniq + cmds: + - echo "{{.IMGS}}" | while read -r img; do docker pull $img; done + + acceptance: + desc: Run acceptance tests + env: + LC_ALL: C + vars: + TEST_OPTIONS: '{{default "" .TEST_OPTIONS}}' + TEST_PATTERN: '{{default "." .TEST_PATTERN}}' + cmds: + - go test {{.TEST_OPTIONS}} -tags=acceptance -p 4 -failfast -race -coverpkg=./... -covermode=atomic -coverprofile=coverage.txt acceptance_test.go -run {{.TEST_PATTERN}} -timeout=1h + + test: + desc: Run unit tests + env: + LC_ALL: C + vars: + TEST_OPTIONS: '{{default "" .TEST_OPTIONS}}' + SOURCE_FILES: '{{default "./..." .SOURCE_FILES}}' + TEST_PATTERN: '{{default "." .TEST_PATTERN}}' + cmds: + - go test {{.TEST_OPTIONS}} -failfast -race -coverpkg=./... -covermode=atomic -coverprofile=coverage.txt {{.SOURCE_FILES}} -run {{.TEST_PATTERN}} -timeout=5m + + cover: + desc: Open the cover tool + cmds: + - go tool cover -html=coverage.txt + + fmt: + desc: gofumpt all code + cmds: + - gofumpt -w -l . + + ci: + desc: Run all CI steps + cmds: + - task: setup + - task: build + - task: test + - task: acceptance + + default: + desc: Runs the default tasks + cmds: + - task: ci + + docs:generate: + desc: Generate docs + cmds: + - ./scripts/cmd_docs.sh + sources: + - cmd/*.go + - files/*.go + - nfpm.go + - ./scripts/cmd_docs.sh + - CONTRIBUTING.md + generates: + - www/docs/cmd/*.md + - www/docs/contributing.md + + docs:releases: + desc: Generate the latest file + cmds: + - ./scripts/pages/releases.sh + generates: + - www/docs/static/latest + + docs:imgs: + desc: Download and resize images + cmds: + - wget -O www/docs/static/logo.png https://github.com/goreleaser/artwork/raw/master/goreleaserfundo.png + - wget -O www/docs/static/card.png "https://og.caarlos0.dev/**GoReleaser**%20%7C%20Deliver%20Go%20binaries%20as%20fast%20and%20easily%20as%20possible.png?theme=light&md=1&fontSize=80px&images=https://github.com/goreleaser.png" + - wget -O www/docs/static/avatar.png https://github.com/goreleaser.png + - convert www/docs/static/avatar.png -define icon:auto-resize=64,48,32,16 www/docs/static/favicon.ico + - convert www/docs/static/avatar.png -resize x120 www/docs/static/apple-touch-icon.png + + docs:serve: + desc: Start documentation server + cmds: + - task: docs:generate + - '{{.DOCKER}} run --rm -it -p 8000:8000 -v ${PWD}/www:/docs docker.io/squidfunk/mkdocs-material' + + docs:build: + desc: Build docs + cmds: + - task: docs:generate + - '{{.DOCKER}} run --rm -v ${PWD}/www:/docs docker.io/squidfunk/mkdocs-material build' + + release: + desc: Create a new tag + vars: + NEXT: + sh: svu n + cmds: + - git tag {{.NEXT}} + - echo {{.NEXT}} + - git push origin --tags diff --git a/vendor/github.com/goreleaser/nfpm/v2/apk/apk.go b/vendor/github.com/goreleaser/nfpm/v2/apk/apk.go new file mode 100644 index 0000000000..9fe97ecb3b --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/apk/apk.go @@ -0,0 +1,604 @@ +/* +Copyright 2019 Torsten Curdt + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Package apk implements nfpm.Packager providing .apk bindings. +package apk + +// Initial implementation from https://gist.github.com/tcurdt/512beaac7e9c12dcf5b6b7603b09d0d8 + +import ( + "archive/tar" + "bufio" + "bytes" + "crypto/sha1" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "hash" + "io" + "net/mail" + "os" + "path/filepath" + "strings" + "sync/atomic" + "text/template" + "time" + + "github.com/goreleaser/nfpm/v2" + "github.com/goreleaser/nfpm/v2/files" + "github.com/goreleaser/nfpm/v2/internal/sign" + gzip "github.com/klauspost/pgzip" +) + +const packagerName = "apk" + +// nolint: gochecknoinits +func init() { + nfpm.RegisterPackager(packagerName, Default) +} + +// https://wiki.alpinelinux.org/wiki/Architecture +// nolint: gochecknoglobals +var archToAlpine = map[string]string{ + "386": "x86", + "amd64": "x86_64", + "arm6": "armhf", + "arm7": "armv7", + "arm64": "aarch64", + "ppc64le": "ppc64le", + "s390": "s390x", +} + +func ensureValidArch(info *nfpm.Info) *nfpm.Info { + if info.APK.Arch != "" { + info.Arch = info.APK.Arch + } else if arch, ok := archToAlpine[info.Arch]; ok { + info.Arch = arch + } + + return info +} + +// Default apk packager. +// nolint: gochecknoglobals +var Default = &Apk{} + +// Apk is an apk packager implementation. +type Apk struct{} + +func (a *Apk) ConventionalFileName(info *nfpm.Info) string { + info = ensureValidArch(info) + version := info.Version + + if info.Prerelease != "" { + version += "" + info.Prerelease + } + + if info.Release != "" { + version += "-" + info.Release + } + + return fmt.Sprintf("%s_%s_%s.apk", info.Name, version, info.Arch) +} + +// ConventionalExtension returns the file name conventionally used for Apk packages +func (*Apk) ConventionalExtension() string { + return ".apk" +} + +// Package writes a new apk package to the given writer using the given info. +func (*Apk) Package(info *nfpm.Info, apk io.Writer) (err error) { + if info.Platform != "linux" { + return fmt.Errorf("invalid platform: %s", info.Platform) + } + info = ensureValidArch(info) + if err = info.Validate(); err != nil { + return err + } + + var bufData bytes.Buffer + + size := int64(0) + // create the data tgz + dataDigest, err := createData(&bufData, info, &size) + if err != nil { + return err + } + + // create the control tgz + var bufControl bytes.Buffer + controlDigest, err := createControl(&bufControl, info, size, dataDigest) + if err != nil { + return err + } + + if info.APK.Signature.KeyFile == "" { + return combineToApk(apk, &bufControl, &bufData) + } + + // create the signature tgz + var bufSignature bytes.Buffer + if err = createSignature(&bufSignature, info, controlDigest); err != nil { + return err + } + + return combineToApk(apk, &bufSignature, &bufControl, &bufData) +} + +type writerCounter struct { + io.Writer + count uint64 + writer io.Writer +} + +func newWriterCounter(w io.Writer) *writerCounter { + return &writerCounter{ + writer: w, + } +} + +func (counter *writerCounter) Write(buf []byte) (int, error) { + n, err := counter.writer.Write(buf) + atomic.AddUint64(&counter.count, uint64(n)) + return n, err +} + +func (counter *writerCounter) Count() uint64 { + return atomic.LoadUint64(&counter.count) +} + +func writeFile(tw *tar.Writer, header *tar.Header, file io.Reader) error { + header.Format = tar.FormatUSTAR + header.ChangeTime = time.Time{} + header.AccessTime = time.Time{} + + err := tw.WriteHeader(header) + if err != nil { + return err + } + + _, err = io.Copy(tw, file) + return err +} + +type tarKind int + +const ( + tarFull tarKind = iota + tarCut +) + +func writeTgz(w io.Writer, kind tarKind, builder func(tw *tar.Writer) error, digest hash.Hash) ([]byte, error) { + mw := io.MultiWriter(digest, w) + gw := gzip.NewWriter(mw) + cw := newWriterCounter(gw) + bw := bufio.NewWriterSize(cw, 4096) + tw := tar.NewWriter(bw) + + err := builder(tw) + if err != nil { + return nil, err + } + + // handle the cut vs full tars + // TODO: document this better, why do we need to call bw.Flush twice if it is a full tar vs the cut tar? + if err = bw.Flush(); err != nil { + return nil, err + } + if err = tw.Close(); err != nil { + return nil, err + } + if kind == tarFull { + if err = bw.Flush(); err != nil { + return nil, err + } + } + + size := cw.Count() + alignedSize := (size + 511) & ^uint64(511) + + increase := alignedSize - size + if increase > 0 { + b := make([]byte, increase) + _, err = cw.Write(b) + if err != nil { + return nil, err + } + } + + if err = gw.Close(); err != nil { + return nil, err + } + + return digest.Sum(nil), nil +} + +func createData(dataTgz io.Writer, info *nfpm.Info, sizep *int64) ([]byte, error) { + builderData := createBuilderData(info, sizep) + dataDigest, err := writeTgz(dataTgz, tarFull, builderData, sha256.New()) + if err != nil { + return nil, err + } + return dataDigest, nil +} + +func createControl(controlTgz io.Writer, info *nfpm.Info, size int64, dataDigest []byte) ([]byte, error) { + builderControl := createBuilderControl(info, size, dataDigest) + controlDigest, err := writeTgz(controlTgz, tarCut, builderControl, sha1.New()) // nolint:gosec + if err != nil { + return nil, err + } + return controlDigest, nil +} + +func createSignature(signatureTgz io.Writer, info *nfpm.Info, controlSHA1Digest []byte) error { + signatureBuilder := createSignatureBuilder(controlSHA1Digest, info) + // we don't actually need to produce a digest here, but writeTgz + // requires it so we just use SHA1 since it is already imported + _, err := writeTgz(signatureTgz, tarCut, signatureBuilder, sha1.New()) // nolint:gosec + if err != nil { + return &nfpm.ErrSigningFailure{Err: err} + } + + return nil +} + +var errNoKeyAddress = errors.New("key name not set and maintainer mail address empty") + +func createSignatureBuilder(digest []byte, info *nfpm.Info) func(*tar.Writer) error { + return func(tw *tar.Writer) error { + signature, err := sign.RSASignSHA1Digest(digest, + info.APK.Signature.KeyFile, info.APK.Signature.KeyPassphrase) + if err != nil { + return err + } + + // needs to exist on the machine during installation: /etc/apk/keys/.rsa.pub + keyname := info.APK.Signature.KeyName + if keyname == "" { + addr, err := mail.ParseAddress(info.Maintainer) + if err != nil { + return fmt.Errorf("key name not set and unable to parse maintainer mail address: %w", err) + } else if addr.Address == "" { + return errNoKeyAddress + } + + keyname = addr.Address + ".rsa.pub" + } + + // In principle apk supports RSA signatures over SHA256/512 keys, but in + // practice verification works but installation segfaults. If this is + // fixed at some point we should also upgrade the hash. In this case, + // the file name will have to start with .SIGN.RSA256 or .SIGN.RSA512. + signHeader := &tar.Header{ + Name: fmt.Sprintf(".SIGN.RSA.%s", keyname), + Mode: 0o600, + Size: int64(len(signature)), + } + + return writeFile(tw, signHeader, bytes.NewReader(signature)) + } +} + +func combineToApk(target io.Writer, readers ...io.Reader) error { + for _, tgz := range readers { + if _, err := io.Copy(target, tgz); err != nil { + return err + } + } + return nil +} + +func createBuilderControl(info *nfpm.Info, size int64, dataDigest []byte) func(tw *tar.Writer) error { + return func(tw *tar.Writer) error { + var infoBuf bytes.Buffer + if err := writeControl(&infoBuf, controlData{ + Info: info, + InstalledSize: size, + Datahash: hex.EncodeToString(dataDigest), + }); err != nil { + return err + } + infoContent := infoBuf.String() + + infoHeader := &tar.Header{ + Name: ".PKGINFO", + Mode: 0o600, + Size: int64(len(infoContent)), + } + + if err := writeFile(tw, infoHeader, strings.NewReader(infoContent)); err != nil { + return err + } + + // NOTE: Apk scripts tend to follow the pattern: + // #!/bin/sh + // + // bin/echo 'running preinstall.sh' // do stuff here + // + // exit 0 + for script, dest := range map[string]string{ + info.Scripts.PreInstall: ".pre-install", + info.APK.Scripts.PreUpgrade: ".pre-upgrade", + info.Scripts.PostInstall: ".post-install", + info.APK.Scripts.PostUpgrade: ".post-upgrade", + info.Scripts.PreRemove: ".pre-deinstall", + info.Scripts.PostRemove: ".post-deinstall", + } { + if script != "" { + if err := newScriptInsideTarGz(tw, script, dest); err != nil { + return err + } + } + } + + return nil + } +} + +func newScriptInsideTarGz(out *tar.Writer, path, dest string) error { + file, err := os.Stat(path) //nolint:gosec + if err != nil { + return err + } + content, err := os.ReadFile(path) + if err != nil { + return err + } + return newItemInsideTarGz(out, content, &tar.Header{ + Name: files.ToNixPath(dest), + Size: int64(len(content)), + Mode: 0o755, + ModTime: file.ModTime(), + Typeflag: tar.TypeReg, + }) +} + +func newItemInsideTarGz(out *tar.Writer, content []byte, header *tar.Header) error { + header.Format = tar.FormatPAX + header.PAXRecords = make(map[string]string) + + hasher := sha1.New() + _, err := hasher.Write(content) + if err != nil { + return fmt.Errorf("failed to hash content of file %s: %w", header.Name, err) + } + header.PAXRecords["APK-TOOLS.checksum.SHA1"] = fmt.Sprintf("%x", hasher.Sum(nil)) + if err := out.WriteHeader(header); err != nil { + return fmt.Errorf("cannot write header of %s file to apk: %w", header.Name, err) + } + if _, err := out.Write(content); err != nil { + return fmt.Errorf("cannot write %s file to apk: %w", header.Name, err) + } + return nil +} + +func createBuilderData(info *nfpm.Info, sizep *int64) func(tw *tar.Writer) error { + created := map[string]bool{} + + return func(tw *tar.Writer) error { + return createFilesInsideTarGz(info, tw, created, sizep) + } +} + +func createFilesInsideTarGz(info *nfpm.Info, tw *tar.Writer, created map[string]bool, sizep *int64) (err error) { + // create explicit directories first + for _, file := range info.Contents { + // at this point, we don't care about other types yet + if file.Type != "dir" { + continue + } + + // only consider contents for this packager + if file.Packager != "" && file.Packager != packagerName { + continue + } + + if err := createTree(tw, file.Destination, created); err != nil { + return err + } + + normalizedName := normalizePath(strings.Trim(file.Destination, "/")) + "/" + + if created[normalizedName] { + return fmt.Errorf("duplicate directory: %q", normalizedName) + } + + err = tw.WriteHeader(&tar.Header{ + Name: normalizedName, + Mode: int64(file.FileInfo.Mode), + Typeflag: tar.TypeDir, + Format: tar.FormatGNU, + Uname: file.FileInfo.Owner, + Gname: file.FileInfo.Group, + ModTime: file.FileInfo.MTime, + }) + if err != nil { + return err + } + + created[normalizedName] = true + } + + for _, file := range info.Contents { + // only consider contents for this packager + if file.Packager != "" && file.Packager != packagerName { + continue + } + + // create implicit directory structure below the current content + if err = createTree(tw, file.Destination, created); err != nil { + return err + } + + switch file.Type { + case "ghost": + // skip ghost files in apk + continue + case "dir": + // already handled above + continue + case "symlink": + err = createSymlinkInsideTarGz(file, tw) + default: + err = copyToTarAndDigest(file, tw, sizep) + } + if err != nil { + return err + } + } + + return nil +} + +func createSymlinkInsideTarGz(file *files.Content, out *tar.Writer) error { + return newItemInsideTarGz(out, []byte{}, &tar.Header{ + Name: strings.TrimLeft(file.Destination, "/"), + Linkname: file.Source, + Typeflag: tar.TypeSymlink, + ModTime: file.FileInfo.MTime, + }) +} + +func copyToTarAndDigest(file *files.Content, tw *tar.Writer, sizep *int64) error { + contents, err := os.ReadFile(file.Source) + if err != nil { + return err + } + header, err := tar.FileInfoHeader(file, file.Source) + if err != nil { + return err + } + + // tar.FileInfoHeader only uses file.Mode().Perm() which masks the mode with + // 0o777 which we don't want because we want to be able to set the suid bit. + header.Mode = int64(file.Mode()) + header.Name = normalizePath(file.Destination) + header.Uname = file.FileInfo.Owner + header.Gname = file.FileInfo.Group + if err = newItemInsideTarGz(tw, contents, header); err != nil { + return err + } + + *sizep += file.Size() + return nil +} + +// normalizePath returns a path separated by slashes without a leading slash. +func normalizePath(src string) string { + return files.ToNixPath(strings.TrimLeft(src, "/")) +} + +// this is needed because the data.tar.gz file should have the empty folders +// as well, so we walk through the dst and create all subfolders. +func createTree(tarw *tar.Writer, dst string, created map[string]bool) error { + for _, path := range pathsToCreate(dst) { + path = normalizePath(path) + "/" + + if created[path] { + // skipping dir that was previously created inside the archive + // (eg: usr/) + continue + } + + if err := tarw.WriteHeader(&tar.Header{ + Name: path, + Mode: 0o755, + Typeflag: tar.TypeDir, + Format: tar.FormatGNU, + Uname: "root", + Gname: "root", + }); err != nil { + return fmt.Errorf("failed to create folder %s: %w", path, err) + } + created[path] = true + } + return nil +} + +func pathsToCreate(dst string) []string { + paths := []string{} + base := strings.Trim(dst, "/") + for { + base = filepath.Dir(base) + if base == "." { + break + } + paths = append(paths, files.ToNixPath(base)) + } + // we don't really need to create those things in order apparently, but, + // it looks really weird if we don't. + result := []string{} + for i := len(paths) - 1; i >= 0; i-- { + result = append(result, paths[i]) + } + return result +} + +// reference: https://wiki.adelielinux.org/wiki/APK_internals#.PKGINFO +const controlTemplate = ` +{{- /* Mandatory fields */ -}} +pkgname = {{.Info.Name}} +pkgver = {{.Info.Version}} + {{- if .Info.Prerelease}}{{ .Info.Prerelease }}{{- end }} + {{- if .Info.Release}}-{{ .Info.Release }}{{- end }} +arch = {{.Info.Arch}} +size = {{.InstalledSize}} +pkgdesc = {{multiline .Info.Description}} +{{- if .Info.Homepage}} +url = {{.Info.Homepage}} +{{- end }} +{{- if .Info.Maintainer}} +maintainer = {{.Info.Maintainer}} +{{- end }} +{{- range $repl := .Info.Replaces}} +replaces = {{ $repl }} +{{- end }} +{{- range $prov := .Info.Provides}} +provides = {{ $prov }} +{{- end }} +{{- range $dep := .Info.Depends}} +depend = {{ $dep }} +{{- end }} +{{- if .Info.License}} +license = {{.Info.License}} +{{- end }} +datahash = {{.Datahash}} +` + +type controlData struct { + Info *nfpm.Info + InstalledSize int64 + Datahash string +} + +func writeControl(w io.Writer, data controlData) error { + tmpl := template.New("control") + tmpl.Funcs(template.FuncMap{ + "multiline": func(strs string) string { + ret := strings.ReplaceAll(strs, "\n", "\n ") + return strings.Trim(ret, " \n") + }, + }) + return template.Must(tmpl.Parse(controlTemplate)).Execute(w, data) +} diff --git a/vendor/github.com/goreleaser/nfpm/v2/arch/arch.go b/vendor/github.com/goreleaser/nfpm/v2/arch/arch.go new file mode 100644 index 0000000000..0f909fee33 --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/arch/arch.go @@ -0,0 +1,757 @@ +// Package arch implements nfpm.Packager providing bindings for Arch Linux packages. +package arch + +import ( + "archive/tar" + "bytes" + "crypto/md5" + "crypto/sha256" + "errors" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/goreleaser/nfpm/v2" + "github.com/goreleaser/nfpm/v2/files" + "github.com/klauspost/compress/zstd" + "github.com/klauspost/pgzip" +) + +var ErrInvalidPkgName = errors.New("archlinux: package names may only contain alphanumeric characters or one of ., _, +, or -, and may not start with hyphen or dot") + +const packagerName = "archlinux" + +// nolint: gochecknoinits +func init() { + nfpm.RegisterPackager(packagerName, Default) +} + +// Default ArchLinux packager. +// nolint: gochecknoglobals +var Default = ArchLinux{} + +type ArchLinux struct{} + +// nolint: gochecknoglobals +var archToArchLinux = map[string]string{ + "all": "any", + "amd64": "x86_64", + "386": "i686", + "arm64": "aarch64", + "arm7": "armv7h", + "arm6": "armv6h", + "arm5": "arm", +} + +func ensureValidArch(info *nfpm.Info) *nfpm.Info { + if info.ArchLinux.Arch != "" { + info.Arch = info.ArchLinux.Arch + } else if arch, ok := archToArchLinux[info.Arch]; ok { + info.Arch = arch + } + + return info +} + +// ConventionalFileName returns a file name for a package conforming +// to Arch Linux package naming guidelines. See: +// https://wiki.archlinux.org/title/Arch_package_guidelines#Package_naming +func (ArchLinux) ConventionalFileName(info *nfpm.Info) string { + info = ensureValidArch(info) + + pkgrel, err := strconv.Atoi(info.Release) + if err != nil { + pkgrel = 1 + } + + name := fmt.Sprintf( + "%s-%s-%d-%s.pkg.tar.zst", + info.Name, + info.Version+strings.ReplaceAll(info.Prerelease, "-", "_"), + pkgrel, + info.Arch, + ) + + return validPkgName(name) +} + +// validPkgName removes any invalid characters from a string +func validPkgName(s string) string { + s = strings.Map(mapValidChar, s) + s = strings.TrimLeft(s, "-.") + return s +} + +// nameIsValid checks whether a package name is valid +func nameIsValid(s string) bool { + return s != "" && s == validPkgName(s) +} + +// mapValidChar returns r if it is allowed, otherwise, returns -1 +func mapValidChar(r rune) rune { + if r >= 'a' && r <= 'z' || + r >= 'A' && r <= 'Z' || + r >= '0' && r <= '9' || + isOneOf(r, '.', '_', '+', '-') { + return r + } + return -1 +} + +// isOneOf checks whether a rune is one of the runes in rr +func isOneOf(r rune, rr ...rune) bool { + for _, char := range rr { + if r == char { + return true + } + } + return false +} + +// Package writes a new archlinux package to the given writer using the given info. +func (ArchLinux) Package(info *nfpm.Info, w io.Writer) error { + if info.Platform != "linux" { + return fmt.Errorf("invalid platform: %s", info.Platform) + } + info = ensureValidArch(info) + if err := info.Validate(); err != nil { + return err + } + + if !nameIsValid(info.Name) { + return ErrInvalidPkgName + } + + zw, err := zstd.NewWriter(w) + if err != nil { + return err + } + defer zw.Close() + + tw := tar.NewWriter(zw) + defer tw.Close() + + entries, totalSize, err := createFilesInTar(info, tw) + if err != nil { + return err + } + + pkginfoEntry, err := createPkginfo(info, tw, totalSize) + if err != nil { + return err + } + + // .PKGINFO must be the first entry in .MTREE + entries = append([]MtreeEntry{*pkginfoEntry}, entries...) + + err = createMtree(info, tw, entries) + if err != nil { + return err + } + + return createScripts(info, tw) +} + +// ConventionalExtension returns the file name conventionally used for Arch Linux packages +func (ArchLinux) ConventionalExtension() string { + return ".pkg.tar.zst" +} + +// createFilesInTar adds the files described in the given info to the given tar writer +func createFilesInTar(info *nfpm.Info, tw *tar.Writer) ([]MtreeEntry, int64, error) { + created := map[string]struct{}{} + var entries []MtreeEntry + var totalSize int64 + + var contents []*files.Content + + for _, content := range info.Contents { + if content.Packager != "" && content.Packager != packagerName { + continue + } + + switch content.Type { + case "dir", "symlink": + contents = append(contents, content) + continue + } + + fi, err := os.Stat(content.Source) + if err != nil { + return nil, 0, err + } + + if fi.IsDir() { + err = filepath.WalkDir(content.Source, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + + relPath := strings.TrimPrefix(path, content.Source) + + if d.IsDir() { + return nil + } + + c := &files.Content{ + Source: path, + Destination: filepath.Join(content.Destination, relPath), + FileInfo: &files.ContentFileInfo{ + Mode: d.Type(), + }, + } + + if d.Type()&os.ModeSymlink != 0 { + c.Type = "symlink" + } + + contents = append(contents, c) + return nil + }) + if err != nil { + return nil, 0, err + } + } else { + contents = append(contents, content) + } + } + + for _, content := range contents { + path := normalizePath(content.Destination) + + switch content.Type { + case "ghost": + // Ignore ghost files + case "dir": + err := createDirs(content.Destination, tw, created) + if err != nil { + return nil, 0, err + } + + modtime := time.Now() + // If the time is given, use it + if content.FileInfo != nil && !content.ModTime().IsZero() { + modtime = content.ModTime() + } + + entries = append(entries, MtreeEntry{ + Destination: path, + Time: modtime.Unix(), + Type: content.Type, + }) + case "symlink": + dir := filepath.Dir(path) + err := createDirs(dir, tw, created) + if err != nil { + return nil, 0, err + } + + modtime := time.Now() + // If the time is given, use it + if content.FileInfo != nil && !content.ModTime().IsZero() { + modtime = content.ModTime() + } + + err = tw.WriteHeader(&tar.Header{ + Name: normalizePath(content.Destination), + Linkname: content.Source, + ModTime: modtime, + Typeflag: tar.TypeSymlink, + }) + if err != nil { + return nil, 0, err + } + + entries = append(entries, MtreeEntry{ + LinkSource: content.Source, + Destination: path, + Time: modtime.Unix(), + Mode: 0o777, + Type: content.Type, + }) + default: + dir := filepath.Dir(path) + err := createDirs(dir, tw, created) + if err != nil { + return nil, 0, err + } + + src, err := os.Open(content.Source) + if err != nil { + return nil, 0, err + } + + srcFi, err := src.Stat() + if err != nil { + return nil, 0, err + } + + header := &tar.Header{ + Name: path, + Mode: int64(srcFi.Mode()), + Typeflag: tar.TypeReg, + Size: srcFi.Size(), + ModTime: srcFi.ModTime(), + } + + if content.FileInfo != nil && content.Mode() != 0 { + header.Mode = int64(content.Mode()) + } + + if content.FileInfo != nil && !content.ModTime().IsZero() { + header.ModTime = content.ModTime() + } + + if content.FileInfo != nil && content.Size() != 0 { + header.Size = content.Size() + } + + err = tw.WriteHeader(header) + if err != nil { + return nil, 0, err + } + + sha256Hash := sha256.New() + md5Hash := md5.New() + + w := io.MultiWriter(tw, sha256Hash, md5Hash) + + _, err = io.Copy(w, src) + if err != nil { + return nil, 0, err + } + + entries = append(entries, MtreeEntry{ + Destination: path, + Time: srcFi.ModTime().Unix(), + Mode: int64(srcFi.Mode()), + Size: srcFi.Size(), + Type: content.Type, + MD5: md5Hash.Sum(nil), + SHA256: sha256Hash.Sum(nil), + }) + + totalSize += srcFi.Size() + } + } + + return entries, totalSize, nil +} + +func createDirs(dst string, tw *tar.Writer, created map[string]struct{}) error { + for _, path := range neededPaths(dst) { + path = normalizePath(path) + "/" + + if _, ok := created[path]; ok { + continue + } + + err := tw.WriteHeader(&tar.Header{ + Name: path, + Mode: 0o755, + Typeflag: tar.TypeDir, + ModTime: time.Now(), + Uname: "root", + Gname: "root", + }) + if err != nil { + return fmt.Errorf("failed to create folder: %w", err) + } + + created[path] = struct{}{} + } + + return nil +} + +func defaultStr(s, def string) string { + if s == "" { + return def + } + return s +} + +func neededPaths(dst string) []string { + dst = files.ToNixPath(dst) + split := strings.Split(strings.Trim(dst, "/."), "/") + + var sb strings.Builder + var paths []string + for index, elem := range split { + if index != 0 { + sb.WriteRune('/') + } + sb.WriteString(elem) + paths = append(paths, sb.String()) + } + + return paths +} + +func normalizePath(src string) string { + return files.ToNixPath(strings.TrimPrefix(src, "/")) +} + +func createPkginfo(info *nfpm.Info, tw *tar.Writer, totalSize int64) (*MtreeEntry, error) { + if !nameIsValid(info.Name) { + return nil, ErrInvalidPkgName + } + + buf := &bytes.Buffer{} + + info = ensureValidArch(info) + + pkgrel, err := strconv.Atoi(info.Release) + if err != nil { + pkgrel = 1 + } + + pkgver := fmt.Sprintf("%s-%d", info.Version, pkgrel) + if info.Epoch != "" { + epoch, err := strconv.ParseUint(info.Epoch, 10, 64) + if err == nil { + pkgver = fmt.Sprintf( + "%d:%s%s-%d", + epoch, + info.Version, + strings.ReplaceAll(info.Prerelease, "-", "_"), + pkgrel, + ) + } + } + + // Description cannot contain newlines + pkgdesc := strings.ReplaceAll(info.Description, "\n", " ") + + _, err = io.WriteString(buf, "# Generated by nfpm\n") + if err != nil { + return nil, err + } + + builddate := strconv.FormatInt(time.Now().Unix(), 10) + totalSizeStr := strconv.FormatInt(totalSize, 10) + + err = writeKVPairs(buf, map[string]string{ + "size": totalSizeStr, + "pkgname": info.Name, + "pkgbase": defaultStr(info.ArchLinux.Pkgbase, info.Name), + "pkgver": pkgver, + "pkgdesc": pkgdesc, + "url": info.Homepage, + "builddate": builddate, + "packager": defaultStr(info.ArchLinux.Packager, "Unknown Packager"), + "arch": info.Arch, + "license": info.License, + }) + if err != nil { + return nil, err + } + + for _, replaces := range info.Replaces { + err = writeKVPair(buf, "replaces", replaces) + if err != nil { + return nil, err + } + } + + for _, conflict := range info.Conflicts { + err = writeKVPair(buf, "conflict", conflict) + if err != nil { + return nil, err + } + } + + for _, provides := range info.Provides { + err = writeKVPair(buf, "provides", provides) + if err != nil { + return nil, err + } + } + + for _, depend := range info.Depends { + err = writeKVPair(buf, "depend", depend) + if err != nil { + return nil, err + } + } + + for _, content := range info.Contents { + if content.Type == "config" || content.Type == "config|noreplace" { + path := normalizePath(content.Destination) + path = strings.TrimPrefix(path, "./") + + err = writeKVPair(buf, "backup", path) + if err != nil { + return nil, err + } + } + } + + size := buf.Len() + + err = tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Mode: 0o644, + Name: ".PKGINFO", + Size: int64(size), + ModTime: time.Now(), + }) + if err != nil { + return nil, err + } + + md5Hash := md5.New() + sha256Hash := sha256.New() + + r := io.TeeReader(buf, md5Hash) + r = io.TeeReader(r, sha256Hash) + + _, err = io.Copy(tw, r) + if err != nil { + return nil, err + } + + return &MtreeEntry{ + Destination: ".PKGINFO", + Time: time.Now().Unix(), + Mode: 0o644, + Size: int64(size), + Type: "file", + MD5: md5Hash.Sum(nil), + SHA256: sha256Hash.Sum(nil), + }, nil +} + +func writeKVPairs(w io.Writer, s map[string]string) error { + for key, val := range s { + err := writeKVPair(w, key, val) + if err != nil { + return err + } + } + return nil +} + +func writeKVPair(w io.Writer, key, value string) error { + if value == "" { + return nil + } + + _, err := io.WriteString(w, key) + if err != nil { + return err + } + + _, err = io.WriteString(w, " = ") + if err != nil { + return err + } + + _, err = io.WriteString(w, value) + if err != nil { + return err + } + + _, err = io.WriteString(w, "\n") + return err +} + +type MtreeEntry struct { + LinkSource string + Destination string + Time int64 + Mode int64 + Size int64 + Type string + MD5 []byte + SHA256 []byte +} + +func (me *MtreeEntry) WriteTo(w io.Writer) (int64, error) { + switch me.Type { + case "dir": + n, err := fmt.Fprintf( + w, + "./%s time=%d.0 type=dir\n", + normalizePath(me.Destination), + me.Time, + ) + return int64(n), err + case "symlink": + n, err := fmt.Fprintf( + w, + "./%s time=%d.0 mode=%o type=link link=%s\n", + normalizePath(me.Destination), + me.Time, + me.Mode, + me.LinkSource, + ) + return int64(n), err + default: + n, err := fmt.Fprintf( + w, + "./%s time=%d.0 mode=%o size=%d type=file md5digest=%x sha256digest=%x\n", + normalizePath(me.Destination), + me.Time, + me.Mode, + me.Size, + me.MD5, + me.SHA256, + ) + return int64(n), err + } +} + +func createMtree(info *nfpm.Info, tw *tar.Writer, entries []MtreeEntry) error { + buf := &bytes.Buffer{} + gw := pgzip.NewWriter(buf) + defer gw.Close() + + created := map[string]struct{}{} + + _, err := io.WriteString(gw, "#mtree\n") + if err != nil { + return err + } + + for _, entry := range entries { + destDir := filepath.Dir(entry.Destination) + + dirs := createDirsMtree(destDir, created) + for _, dir := range dirs { + _, err = dir.WriteTo(gw) + if err != nil { + return err + } + } + + _, err = entry.WriteTo(gw) + if err != nil { + return err + } + } + + gw.Close() + + err = tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Mode: 0o644, + Name: ".MTREE", + Size: int64(buf.Len()), + ModTime: time.Now(), + }) + if err != nil { + return err + } + + _, err = io.Copy(tw, buf) + return err +} + +func createDirsMtree(dst string, created map[string]struct{}) []MtreeEntry { + var out []MtreeEntry + for _, path := range neededPaths(dst) { + path = normalizePath(path) + "/" + + if path == "./" { + continue + } + + if _, ok := created[path]; ok { + continue + } + + out = append(out, MtreeEntry{ + Destination: path, + Time: time.Now().Unix(), + Mode: 0o755, + Type: "dir", + }) + + created[path] = struct{}{} + } + return out +} + +func createScripts(info *nfpm.Info, tw *tar.Writer) error { + scripts := map[string]string{} + + if info.Scripts.PreInstall != "" { + scripts["pre_install"] = info.Scripts.PreInstall + } + + if info.Scripts.PostInstall != "" { + scripts["post_install"] = info.Scripts.PostInstall + } + + if info.Scripts.PreRemove != "" { + scripts["pre_remove"] = info.Scripts.PreRemove + } + + if info.Scripts.PostRemove != "" { + scripts["post_remove"] = info.Scripts.PostRemove + } + + if info.ArchLinux.Scripts.PreUpgrade != "" { + scripts["pre_upgrade"] = info.ArchLinux.Scripts.PreUpgrade + } + + if info.ArchLinux.Scripts.PostUpgrade != "" { + scripts["post_upgrade"] = info.ArchLinux.Scripts.PostUpgrade + } + + if len(scripts) == 0 { + return nil + } + + buf := &bytes.Buffer{} + + err := writeScripts(buf, scripts) + if err != nil { + return err + } + + err = tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Mode: 0o644, + Name: ".INSTALL", + Size: int64(buf.Len()), + ModTime: time.Now(), + }) + if err != nil { + return err + } + + _, err = io.Copy(tw, buf) + return err +} + +func writeScripts(w io.Writer, scripts map[string]string) error { + for script, path := range scripts { + fmt.Fprintf(w, "function %s() {\n", script) + + fl, err := os.Open(path) + if err != nil { + return err + } + + _, err = io.Copy(w, fl) + if err != nil { + return err + } + + fl.Close() + + _, err = io.WriteString(w, "\n}\n\n") + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/goreleaser/nfpm/v2/cmd/nfpm/main.go b/vendor/github.com/goreleaser/nfpm/v2/cmd/nfpm/main.go new file mode 100644 index 0000000000..29b2a77d6c --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/cmd/nfpm/main.go @@ -0,0 +1,42 @@ +package main + +import ( + "fmt" + "os" + "runtime/debug" + + "github.com/goreleaser/nfpm/v2/internal/cmd" +) + +// nolint: gochecknoglobals +var ( + version = "dev" + commit = "" + date = "" + builtBy = "" +) + +func main() { + cmd.Execute( + buildVersion(version, commit, date, builtBy), + os.Exit, + os.Args[1:], + ) +} + +func buildVersion(version, commit, date, builtBy string) string { + result := "nfpm version " + version + if commit != "" { + result = fmt.Sprintf("%s\ncommit: %s", result, commit) + } + if date != "" { + result = fmt.Sprintf("%s\nbuilt at: %s", result, date) + } + if builtBy != "" { + result = fmt.Sprintf("%s\nbuilt by: %s", result, builtBy) + } + if info, ok := debug.ReadBuildInfo(); ok && info.Main.Sum != "" { + result = fmt.Sprintf("%s\nmodule version: %s, checksum: %s", result, info.Main.Version, info.Main.Sum) + } + return result +} diff --git a/vendor/github.com/goreleaser/nfpm/v2/deb/deb.go b/vendor/github.com/goreleaser/nfpm/v2/deb/deb.go new file mode 100644 index 0000000000..cf7526b72b --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/deb/deb.go @@ -0,0 +1,856 @@ +// Package deb implements nfpm.Packager providing .deb bindings. +package deb + +import ( + "archive/tar" + "bytes" + "crypto/md5" // nolint:gas + "crypto/sha1" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "text/template" + "time" + + "github.com/blakesmith/ar" + "github.com/goreleaser/chglog" + "github.com/goreleaser/nfpm/v2" + "github.com/goreleaser/nfpm/v2/deprecation" + "github.com/goreleaser/nfpm/v2/files" + "github.com/goreleaser/nfpm/v2/internal/sign" + "github.com/klauspost/compress/zstd" + gzip "github.com/klauspost/pgzip" + "github.com/ulikunitz/xz" +) + +const packagerName = "deb" + +// nolint: gochecknoinits +func init() { + nfpm.RegisterPackager(packagerName, Default) +} + +// nolint: gochecknoglobals +var archToDebian = map[string]string{ + "386": "i386", + "arm5": "armel", + "arm6": "armhf", + "arm7": "armhf", + "mipsle": "mipsel", + "mips64le": "mips64el", + "ppc64le": "ppc64el", + "s390": "s390x", +} + +func ensureValidArch(info *nfpm.Info) *nfpm.Info { + if info.Deb.Arch != "" { + info.Arch = info.Deb.Arch + } else if arch, ok := archToDebian[info.Arch]; ok { + info.Arch = arch + } + + return info +} + +// Default deb packager. +// nolint: gochecknoglobals +var Default = &Deb{} + +// Deb is a deb packager implementation. +type Deb struct{} + +// ConventionalFileName returns a file name according +// to the conventions for debian packages. See: +// https://manpages.debian.org/buster/dpkg-dev/dpkg-name.1.en.html +func (*Deb) ConventionalFileName(info *nfpm.Info) string { + info = ensureValidArch(info) + + version := info.Version + if info.Prerelease != "" { + version += "~" + info.Prerelease + } + + if info.VersionMetadata != "" { + version += "+" + info.VersionMetadata + } + + if info.Release != "" { + version += "-" + info.Release + } + + // package_version_architecture.package-type + return fmt.Sprintf("%s_%s_%s.deb", info.Name, version, info.Arch) +} + +// ConventionalExtension returns the file name conventionally used for Deb packages +func (*Deb) ConventionalExtension() string { + return ".deb" +} + +// ErrInvalidSignatureType happens if the signature type of a deb is not one of +// origin, maint or archive. +var ErrInvalidSignatureType = errors.New("invalid signature type") + +// Package writes a new deb package to the given writer using the given info. +func (d *Deb) Package(info *nfpm.Info, deb io.Writer) (err error) { // nolint: funlen + info = ensureValidArch(info) + if err = info.Validate(); err != nil { + return err + } + + // Set up some deb specific defaults + d.SetPackagerDefaults(info) + + dataTarball, md5sums, instSize, dataTarballName, err := createDataTarball(info) + if err != nil { + return err + } + + controlTarGz, err := createControl(instSize, md5sums, info) + if err != nil { + return err + } + + debianBinary := []byte("2.0\n") + + w := ar.NewWriter(deb) + if err := w.WriteGlobalHeader(); err != nil { + return fmt.Errorf("cannot write ar header to deb file: %w", err) + } + + if err := addArFile(w, "debian-binary", debianBinary); err != nil { + return fmt.Errorf("cannot pack debian-binary: %w", err) + } + + if err := addArFile(w, "control.tar.gz", controlTarGz); err != nil { + return fmt.Errorf("cannot add control.tar.gz to deb: %w", err) + } + + if err := addArFile(w, dataTarballName, dataTarball); err != nil { + return fmt.Errorf("cannot add data.tar.gz to deb: %w", err) + } + + if info.Deb.Signature.KeyFile != "" { + sig, sigType, err := doSign(info, debianBinary, controlTarGz, dataTarball) + if err != nil { + return err + } + + if err := addArFile(w, "_gpg"+sigType, sig); err != nil { + return &nfpm.ErrSigningFailure{ + Err: fmt.Errorf("add signature to ar file: %w", err), + } + } + } + + return nil +} + +func doSign(info *nfpm.Info, debianBinary, controlTarGz, dataTarball []byte) ([]byte, string, error) { + switch info.Deb.Signature.Method { + case "dpkg-sig": + return dpkgSign(info, debianBinary, controlTarGz, dataTarball) + default: + return debSign(info, debianBinary, controlTarGz, dataTarball) + } +} + +func dpkgSign(info *nfpm.Info, debianBinary, controlTarGz, dataTarball []byte) ([]byte, string, error) { + sigType := "builder" + if info.Deb.Signature.Type != "" { + sigType = info.Deb.Signature.Type + } + + data, err := readDpkgSigData(info, debianBinary, controlTarGz, dataTarball) + if err != nil { + return nil, sigType, &nfpm.ErrSigningFailure{Err: err} + } + + sig, err := sign.PGPClearSignWithKeyID(data, info.Deb.Signature.KeyFile, info.Deb.Signature.KeyPassphrase, info.Deb.Signature.KeyID) + if err != nil { + return nil, sigType, &nfpm.ErrSigningFailure{Err: err} + } + return sig, sigType, nil +} + +func debSign(info *nfpm.Info, debianBinary, controlTarGz, dataTarball []byte) ([]byte, string, error) { + data := readDebsignData(debianBinary, controlTarGz, dataTarball) + + sigType := "origin" + if info.Deb.Signature.Type != "" { + sigType = info.Deb.Signature.Type + } + + if sigType != "origin" && sigType != "maint" && sigType != "archive" { + return nil, sigType, &nfpm.ErrSigningFailure{ + Err: ErrInvalidSignatureType, + } + } + + sig, err := sign.PGPArmoredDetachSignWithKeyID(data, info.Deb.Signature.KeyFile, info.Deb.Signature.KeyPassphrase, info.Deb.Signature.KeyID) + if err != nil { + return nil, sigType, &nfpm.ErrSigningFailure{Err: err} + } + return sig, sigType, nil +} + +func readDebsignData(debianBinary, controlTarGz, dataTarball []byte) io.Reader { + return io.MultiReader(bytes.NewReader(debianBinary), bytes.NewReader(controlTarGz), + bytes.NewReader(dataTarball)) +} + +// reference: https://manpages.debian.org/jessie/dpkg-sig/dpkg-sig.1.en.html +const dpkgSigTemplate = ` +Hash: SHA1 + +Version: 4 +Signer: {{ .Signer }} +Date: {{ .Date }} +Role: {{ .Role }} +Files: +{{range .Files}}{{ .Md5Sum }} {{ .Sha1Sum }} {{ .Size }} {{ .Name }}{{end}} +` + +type dpkgSigData struct { + Signer string + Date time.Time + Role string + Files []dpkgSigFileLine + Info *nfpm.Info +} +type dpkgSigFileLine struct { + Md5Sum [16]byte + Sha1Sum [20]byte + Size int + Name string +} + +func newDpkgSigFileLine(name string, fileContent []byte) dpkgSigFileLine { + return dpkgSigFileLine{ + Name: name, + Md5Sum: md5.Sum(fileContent), + Sha1Sum: sha1.Sum(fileContent), + Size: len(fileContent), + } +} + +func readDpkgSigData(info *nfpm.Info, debianBinary, controlTarGz, dataTarball []byte) (io.Reader, error) { + data := dpkgSigData{ + Signer: info.Deb.Signature.Signer, + Date: time.Now(), + Role: info.Deb.Signature.Type, + Files: []dpkgSigFileLine{ + newDpkgSigFileLine("debian-binary", debianBinary), + newDpkgSigFileLine("control.tar.gz", controlTarGz), + newDpkgSigFileLine("data.tar.gz", dataTarball), + }, + } + temp, _ := template.New("dpkg-sig").Parse(dpkgSigTemplate) + buf := &bytes.Buffer{} + err := temp.Execute(buf, data) + if err != nil { + return nil, fmt.Errorf("dpkg-sig template error: %w", err) + } + return buf, nil +} + +func (*Deb) SetPackagerDefaults(info *nfpm.Info) { + // Priority should be set on all packages per: + // https://www.debian.org/doc/debian-policy/ch-archive.html#priorities + // "optional" seems to be the safe/sane default here + if info.Priority == "" { + info.Priority = "optional" + } + + // The safe thing here feels like defaulting to something like below. + // That will prevent existing configs from breaking anyway... Wondering + // if in the long run we should be more strict about this and error when + // not set? + if info.Maintainer == "" { + deprecation.Println("Leaving the 'maintainer' field unset will not be allowed in a future version") + info.Maintainer = "Unset Maintainer " + } +} + +func addArFile(w *ar.Writer, name string, body []byte) error { + header := ar.Header{ + Name: files.ToNixPath(name), + Size: int64(len(body)), + Mode: 0o644, + ModTime: time.Now(), + } + if err := w.WriteHeader(&header); err != nil { + return fmt.Errorf("cannot write file header: %w", err) + } + _, err := w.Write(body) + return err +} + +type nopCloser struct { + io.Writer +} + +func (nopCloser) Close() error { return nil } + +func createDataTarball(info *nfpm.Info) (dataTarBall, md5sums []byte, + instSize int64, name string, err error, +) { + var ( + dataTarball bytes.Buffer + dataTarballWriteCloser io.WriteCloser + ) + + switch info.Deb.Compression { + case "", "gzip": // the default for now + dataTarballWriteCloser = gzip.NewWriter(&dataTarball) + name = "data.tar.gz" + case "xz": + dataTarballWriteCloser, err = xz.NewWriter(&dataTarball) + if err != nil { + return nil, nil, 0, "", err + } + name = "data.tar.xz" + case "zstd": + dataTarballWriteCloser, err = zstd.NewWriter(&dataTarball) + if err != nil { + return nil, nil, 0, "", err + } + name = "data.tar.zst" + case "none": + dataTarballWriteCloser = nopCloser{Writer: &dataTarball} + name = "data.tar" + default: + return nil, nil, 0, "", fmt.Errorf("unknown compression algorithm: %s", info.Deb.Compression) + } + + // the writer is properly closed later, this is just in case that we error out + defer dataTarballWriteCloser.Close() // nolint: errcheck + + md5sums, instSize, err = fillDataTar(info, dataTarballWriteCloser) + if err != nil { + return nil, nil, 0, "", err + } + + if err := dataTarballWriteCloser.Close(); err != nil { + return nil, nil, 0, "", fmt.Errorf("closing data tarball: %w", err) + } + + return dataTarball.Bytes(), md5sums, instSize, name, nil +} + +func fillDataTar(info *nfpm.Info, w io.Writer) (md5sums []byte, instSize int64, err error) { + out := tar.NewWriter(w) + + // the writer is properly closed later, this is just in case that we have + // an error in another part of the code. + defer out.Close() // nolint: errcheck + + created := map[string]bool{} + + md5buf, instSize, err := createFilesInsideDataTar(info, out, created) + if err != nil { + return nil, 0, err + } + + if err := out.Close(); err != nil { + return nil, 0, fmt.Errorf("closing data.tar.gz: %w", err) + } + + return md5buf.Bytes(), instSize, nil +} + +func createSymlinkInsideTar(file *files.Content, out *tar.Writer) error { + return newItemInsideTar(out, []byte{}, &tar.Header{ + Name: normalizePath(file.Destination), + Linkname: file.Source, + Typeflag: tar.TypeSymlink, + ModTime: file.FileInfo.MTime, + Format: tar.FormatGNU, + }) +} + +func createFilesInsideDataTar(info *nfpm.Info, tw *tar.Writer, + created map[string]bool, +) (md5buf bytes.Buffer, instSize int64, err error) { + // create explicit directories first + for _, file := range info.Contents { + // at this point, we don't care about other types yet + if file.Type != "dir" { + continue + } + + // only consider contents for this packager + if file.Packager != "" && file.Packager != packagerName { + continue + } + + if err := createTree(tw, file.Destination, created); err != nil { + return md5buf, 0, err + } + + normalizedName := normalizePath(strings.Trim(file.Destination, "/")) + "/" + + if created[normalizedName] { + return md5buf, 0, fmt.Errorf("duplicate directory: %q", normalizedName) + } + + err = tw.WriteHeader(&tar.Header{ + Name: normalizedName, + Mode: int64(file.FileInfo.Mode), + Typeflag: tar.TypeDir, + Format: tar.FormatGNU, + Uname: file.FileInfo.Owner, + Gname: file.FileInfo.Group, + ModTime: file.FileInfo.MTime, + }) + if err != nil { + return md5buf, 0, err + } + + created[normalizedName] = true + } + + // create files and implicit directories + for _, file := range info.Contents { + // only consider contents for this packager + if file.Packager != "" && file.Packager != packagerName { + continue + } + // create implicit directory structure below the current content + if err = createTree(tw, file.Destination, created); err != nil { + return md5buf, 0, err + } + + var size int64 // declare early to avoid shadowing err + switch file.Type { + case "ghost": + // skip ghost files in deb + continue + case "dir": + // already handled above + continue + case "symlink": + err = createSymlinkInsideTar(file, tw) + default: + size, err = copyToTarAndDigest(file, tw, &md5buf) + } + if err != nil { + return md5buf, 0, err + } + instSize += size + } + + if info.Changelog != "" { + size, err := createChangelogInsideDataTar(tw, &md5buf, created, info) + if err != nil { + return md5buf, 0, err + } + + instSize += size + } + + return md5buf, instSize, nil +} + +func copyToTarAndDigest(file *files.Content, tw *tar.Writer, md5w io.Writer) (int64, error) { + tarFile, err := os.OpenFile(file.Source, os.O_RDONLY, 0o600) //nolint:gosec + if err != nil { + return 0, fmt.Errorf("could not add tarFile to the archive: %w", err) + } + // don't care if it errs while closing... + defer tarFile.Close() // nolint: errcheck,gosec + + header, err := tar.FileInfoHeader(file, file.Source) + if err != nil { + return 0, err + } + + // tar.FileInfoHeader only uses file.Mode().Perm() which masks the mode with + // 0o777 which we don't want because we want to be able to set the suid bit. + header.Mode = int64(file.Mode()) + header.Format = tar.FormatGNU + header.Name = normalizePath(file.Destination) + header.Uname = file.FileInfo.Owner + header.Gname = file.FileInfo.Group + if err := tw.WriteHeader(header); err != nil { + return 0, fmt.Errorf("cannot write header of %s to data.tar.gz: %w", file.Source, err) + } + digest := md5.New() // nolint:gas + if _, err := io.Copy(tw, io.TeeReader(tarFile, digest)); err != nil { + return 0, fmt.Errorf("failed to copy: %w", err) + } + if _, err := fmt.Fprintf(md5w, "%x %s\n", digest.Sum(nil), header.Name); err != nil { + return 0, fmt.Errorf("failed to write md5: %w", err) + } + return file.Size(), nil +} + +func createChangelogInsideDataTar(tarw *tar.Writer, md5w io.Writer, + created map[string]bool, info *nfpm.Info, +) (int64, error) { + var buf bytes.Buffer + out, err := gzip.NewWriterLevel(&buf, gzip.BestCompression) + if err != nil { + return 0, fmt.Errorf("could not create gzip writer: %w", err) + } + // the writers are properly closed later, this is just in case that we have + // an error in another part of the code. + defer out.Close() // nolint: errcheck + + changelogContent, err := formatChangelog(info) + if err != nil { + return 0, err + } + + if _, err = io.WriteString(out, changelogContent); err != nil { + return 0, err + } + + if err = out.Close(); err != nil { + return 0, fmt.Errorf("closing changelog.Debian.gz: %w", err) + } + + changelogData := buf.Bytes() + + // https://www.debian.org/doc/manuals/developers-reference/pkgs.de.html#recording-changes-in-the-package + // https://lintian.debian.org/tags/debian-changelog-file-missing-or-wrong-name + changelogName := normalizePath(fmt.Sprintf("/usr/share/doc/%s/changelog.Debian.gz", info.Name)) + if err = createTree(tarw, changelogName, created); err != nil { + return 0, err + } + + digest := md5.New() // nolint:gas + if _, err = digest.Write(changelogData); err != nil { + return 0, err + } + + if _, err = fmt.Fprintf(md5w, "%x %s\n", digest.Sum(nil), changelogName); err != nil { + return 0, err + } + + if err = newFileInsideTar(tarw, changelogName, changelogData); err != nil { + return 0, err + } + + return int64(len(changelogData)), nil +} + +func formatChangelog(info *nfpm.Info) (string, error) { + changelog, err := info.GetChangeLog() + if err != nil { + return "", err + } + + tpl, err := chglog.DebTemplate() + if err != nil { + return "", err + } + + formattedChangelog, err := chglog.FormatChangelog(changelog, tpl) + if err != nil { + return "", err + } + + return strings.TrimSpace(formattedChangelog) + "\n", nil +} + +// nolint:funlen +func createControl(instSize int64, md5sums []byte, info *nfpm.Info) (controlTarGz []byte, err error) { + var buf bytes.Buffer + compress := gzip.NewWriter(&buf) + out := tar.NewWriter(compress) + // the writers are properly closed later, this is just in case that we have + // an error in another part of the code. + defer out.Close() // nolint: errcheck + defer compress.Close() // nolint: errcheck + + var body bytes.Buffer + if err = writeControl(&body, controlData{ + Info: info, + InstalledSize: instSize / 1024, + }); err != nil { + return nil, err + } + + filesToCreate := map[string][]byte{ + "control": body.Bytes(), + "md5sums": md5sums, + "conffiles": conffiles(info), + } + + triggers := createTriggers(info) + if len(triggers) > 0 { + filesToCreate["triggers"] = triggers + } + + for name, content := range filesToCreate { + if err := newFileInsideTar(out, name, content); err != nil { + return nil, err + } + } + + type fileAndMode struct { + fileName string + mode int64 + } + + specialFiles := map[string]*fileAndMode{} + specialFiles[info.Scripts.PreInstall] = &fileAndMode{ + fileName: "preinst", + mode: 0o755, + } + specialFiles[info.Scripts.PostInstall] = &fileAndMode{ + fileName: "postinst", + mode: 0o755, + } + specialFiles[info.Scripts.PreRemove] = &fileAndMode{ + fileName: "prerm", + mode: 0o755, + } + specialFiles[info.Scripts.PostRemove] = &fileAndMode{ + fileName: "postrm", + mode: 0o755, + } + specialFiles[info.Overridables.Deb.Scripts.Rules] = &fileAndMode{ + fileName: "rules", + mode: 0o755, + } + specialFiles[info.Overridables.Deb.Scripts.Templates] = &fileAndMode{ + fileName: "templates", + mode: 0o644, + } + specialFiles[info.Overridables.Deb.Scripts.Config] = &fileAndMode{ + fileName: "config", + mode: 0o755, + } + + for path, destMode := range specialFiles { + if path != "" { + if err := newFilePathInsideTar(out, path, destMode.fileName, destMode.mode); err != nil { + return nil, err + } + } + } + + if err := out.Close(); err != nil { + return nil, fmt.Errorf("closing control.tar.gz: %w", err) + } + if err := compress.Close(); err != nil { + return nil, fmt.Errorf("closing control.tar.gz: %w", err) + } + return buf.Bytes(), nil +} + +func newItemInsideTar(out *tar.Writer, content []byte, header *tar.Header) error { + if err := out.WriteHeader(header); err != nil { + return fmt.Errorf("cannot write header of %s file to control.tar.gz: %w", header.Name, err) + } + if _, err := out.Write(content); err != nil { + return fmt.Errorf("cannot write %s file to control.tar.gz: %w", header.Name, err) + } + return nil +} + +func newFileInsideTar(out *tar.Writer, name string, content []byte) error { + return newItemInsideTar(out, content, &tar.Header{ + Name: normalizePath(name), + Size: int64(len(content)), + Mode: 0o644, + ModTime: time.Now(), + Typeflag: tar.TypeReg, + Format: tar.FormatGNU, + }) +} + +func newFilePathInsideTar(out *tar.Writer, path, dest string, mode int64) error { + file, err := os.Open(path) //nolint:gosec + if err != nil { + return err + } + content, err := io.ReadAll(file) + if err != nil { + return err + } + return newItemInsideTar(out, content, &tar.Header{ + Name: normalizePath(dest), + Size: int64(len(content)), + Mode: mode, + ModTime: time.Now(), + Typeflag: tar.TypeReg, + Format: tar.FormatGNU, + }) +} + +// normalizePath returns a path separated by slashes, all relative path items +// resolved and relative to the current directory (so it starts with "./"). +func normalizePath(src string) string { + return "." + files.ToNixPath(filepath.Join("/", src)) +} + +// this is needed because the data.tar.gz file should have the empty folders +// as well, so we walk through the dst and create all subfolders. +func createTree(tarw *tar.Writer, dst string, created map[string]bool) error { + for _, path := range pathsToCreate(dst) { + path = normalizePath(path) + "/" + + if created[path] { + // skipping dir that was previously created inside the archive + // (eg: usr/) + continue + } + + if err := tarw.WriteHeader(&tar.Header{ + Name: path, + Mode: 0o755, + Typeflag: tar.TypeDir, + Format: tar.FormatGNU, + ModTime: time.Now(), + Uname: "root", + Gname: "root", + }); err != nil { + return fmt.Errorf("failed to create folder: %w", err) + } + created[path] = true + } + return nil +} + +func pathsToCreate(dst string) []string { + paths := []string{} + base := strings.Trim(dst, "/") + for { + base = filepath.Dir(base) + if base == "." { + break + } + paths = append(paths, files.ToNixPath(base)) + } + // we don't really need to create those things in order apparently, but, + // it looks really weird if we don't. + result := []string{} + for i := len(paths) - 1; i >= 0; i-- { + result = append(result, paths[i]) + } + return result +} + +func conffiles(info *nfpm.Info) []byte { + // nolint: prealloc + var confs []string + for _, file := range info.Contents { + if file.Packager != "" && file.Packager != packagerName { + continue + } + switch file.Type { + case "config", "config|noreplace": + confs = append(confs, file.Destination) + } + } + return []byte(strings.Join(confs, "\n") + "\n") +} + +func createTriggers(info *nfpm.Info) []byte { + var buffer bytes.Buffer + + // https://man7.org/linux/man-pages/man5/deb-triggers.5.html + triggerEntries := []struct { + Directive string + TriggerNames *[]string + }{ + {"interest", &info.Deb.Triggers.Interest}, + {"interest-await", &info.Deb.Triggers.InterestAwait}, + {"interest-noawait", &info.Deb.Triggers.InterestNoAwait}, + {"activate", &info.Deb.Triggers.Activate}, + {"activate-await", &info.Deb.Triggers.ActivateAwait}, + {"activate-noawait", &info.Deb.Triggers.ActivateNoAwait}, + } + + for _, triggerEntry := range triggerEntries { + for _, triggerName := range *triggerEntry.TriggerNames { + fmt.Fprintf(&buffer, "%s %s\n", triggerEntry.Directive, triggerName) + } + } + + return buffer.Bytes() +} + +const controlTemplate = ` +{{- /* Mandatory fields */ -}} +Package: {{.Info.Name}} +Version: {{ if .Info.Epoch}}{{ .Info.Epoch }}:{{ end }}{{.Info.Version}} + {{- if .Info.Prerelease}}~{{ .Info.Prerelease }}{{- end }} + {{- if .Info.VersionMetadata}}+{{ .Info.VersionMetadata }}{{- end }} + {{- if .Info.Release}}-{{ .Info.Release }}{{- end }} +Section: {{.Info.Section}} +Priority: {{.Info.Priority}} +Architecture: {{ if ne .Info.Platform "linux"}}{{ .Info.Platform }}-{{ end }}{{.Info.Arch}} +{{- /* Optional fields */ -}} +{{- if .Info.Maintainer}} +Maintainer: {{.Info.Maintainer}} +{{- end }} +Installed-Size: {{.InstalledSize}} +{{- with .Info.Replaces}} +Replaces: {{join .}} +{{- end }} +{{- with nonEmpty .Info.Provides}} +Provides: {{join .}} +{{- end }} +{{- with .Info.Depends}} +Depends: {{join .}} +{{- end }} +{{- with .Info.Recommends}} +Recommends: {{join .}} +{{- end }} +{{- with .Info.Suggests}} +Suggests: {{join .}} +{{- end }} +{{- with .Info.Conflicts}} +Conflicts: {{join .}} +{{- end }} +{{- with .Info.Deb.Breaks}} +Breaks: {{join .}} +{{- end }} +{{- if .Info.Homepage}} +Homepage: {{.Info.Homepage}} +{{- end }} +{{- /* Mandatory fields */}} +Description: {{multiline .Info.Description}} +{{- range $key, $value := .Info.Deb.Fields }} +{{- if $value }} +{{$key}}: {{$value}} +{{- end }} +{{- end }} +` + +type controlData struct { + Info *nfpm.Info + InstalledSize int64 +} + +func writeControl(w io.Writer, data controlData) error { + tmpl := template.New("control") + tmpl.Funcs(template.FuncMap{ + "join": func(strs []string) string { + return strings.Trim(strings.Join(strs, ", "), " ") + }, + "multiline": func(strs string) string { + ret := strings.ReplaceAll(strs, "\n", "\n ") + return strings.Trim(ret, " \n") + }, + "nonEmpty": func(strs []string) []string { + var result []string + for _, s := range strs { + s := strings.TrimSpace(s) + if s == "" { + continue + } + result = append(result, s) + } + return result + }, + }) + return template.Must(tmpl.Parse(controlTemplate)).Execute(w, data) +} diff --git a/vendor/github.com/goreleaser/nfpm/v2/deprecation/deprecation.go b/vendor/github.com/goreleaser/nfpm/v2/deprecation/deprecation.go new file mode 100644 index 0000000000..aab9adaae1 --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/deprecation/deprecation.go @@ -0,0 +1,31 @@ +// Package deprecation provides centralized deprecation notice messaging for nfpm. +package deprecation + +import ( + "fmt" + "io" + "os" +) + +type prefixed struct{ io.Writer } + +func (p prefixed) Write(b []byte) (int, error) { + return p.Writer.Write(append([]byte("DEPRECATION WARNING: "), b...)) +} + +var Noticer io.Writer = prefixed{os.Stderr} + +// Print prints the given string to the Noticer. +func Print(s string) { + fmt.Fprint(Noticer, s) +} + +// Println printslns the given string to the Noticer. +func Println(s string) { + fmt.Fprintln(Noticer, s) +} + +// Printf printfs the given string to the Noticer. +func Printf(format string, a ...interface{}) { + fmt.Fprintf(Noticer, format, a...) +} diff --git a/vendor/github.com/goreleaser/nfpm/v2/files/files.go b/vendor/github.com/goreleaser/nfpm/v2/files/files.go new file mode 100644 index 0000000000..7b220fea9b --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/files/files.go @@ -0,0 +1,237 @@ +package files + +import ( + "fmt" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/goreleaser/nfpm/v2/internal/glob" +) + +// Content describes the source and destination +// of one file to copy into a package. +type Content struct { + Source string `yaml:"src,omitempty" json:"src,omitempty"` + Destination string `yaml:"dst" json:"dst"` + Type string `yaml:"type,omitempty" json:"type,omitempty" jsonschema:"enum=symlink,enum=ghost,enum=config,enum=config|noreplace,enum=dir,enum=,default="` + Packager string `yaml:"packager,omitempty" json:"packager,omitempty"` + FileInfo *ContentFileInfo `yaml:"file_info,omitempty" json:"file_info,omitempty"` +} + +type ContentFileInfo struct { + Owner string `yaml:"owner,omitempty" json:"owner,omitempty"` + Group string `yaml:"group,omitempty" json:"group,omitempty"` + Mode os.FileMode `yaml:"mode,omitempty" json:"mode,omitempty"` + MTime time.Time `yaml:"mtime,omitempty" json:"mtime,omitempty"` + Size int64 `yaml:"-" json:"-"` +} + +// Contents list of Content to process. +type Contents []*Content + +func (c Contents) Len() int { + return len(c) +} + +func (c Contents) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} + +func (c Contents) Less(i, j int) bool { + a, b := c[i], c[j] + + if a.Destination != b.Destination { + return a.Destination < b.Destination + } + + if a.Type != b.Type { + return a.Type < b.Type + } + + return a.Packager < b.Packager +} + +func (c Contents) ContainsDestination(dst string) bool { + for _, content := range c { + if strings.TrimRight(content.Destination, "/") == strings.TrimRight(dst, "/") { + return true + } + } + + return false +} + +func (c *Content) WithFileInfoDefaults() *Content { + cc := &Content{ + Source: c.Source, + Destination: c.Destination, + Type: c.Type, + Packager: c.Packager, + FileInfo: c.FileInfo, + } + if cc.FileInfo == nil { + cc.FileInfo = &ContentFileInfo{} + } + if cc.FileInfo.Owner == "" { + cc.FileInfo.Owner = "root" + } + if cc.FileInfo.Group == "" { + cc.FileInfo.Group = "root" + } + if cc.Type == "dir" && cc.FileInfo.Mode == 0 { + cc.FileInfo.Mode = 0o755 + } + + // determine if we still need info + fileInfoAlreadyComplete := (!cc.FileInfo.MTime.IsZero() && + cc.FileInfo.Mode != 0 && + (cc.FileInfo.Size != 0 || cc.Type == "dir")) + + // only stat source when we actually need more information + if cc.Source != "" && !fileInfoAlreadyComplete { + info, err := os.Stat(cc.Source) + if err == nil { + if cc.FileInfo.MTime.IsZero() { + cc.FileInfo.MTime = info.ModTime() + } + if cc.FileInfo.Mode == 0 { + cc.FileInfo.Mode = info.Mode() + } + cc.FileInfo.Size = info.Size() + } + } + + if cc.FileInfo.MTime.IsZero() { + cc.FileInfo.MTime = time.Now().UTC() + } + return cc +} + +// Name to part of the os.FileInfo interface +func (c *Content) Name() string { + return c.Source +} + +// Size to part of the os.FileInfo interface +func (c *Content) Size() int64 { + return c.FileInfo.Size +} + +// Mode to part of the os.FileInfo interface +func (c *Content) Mode() os.FileMode { + return c.FileInfo.Mode +} + +// ModTime to part of the os.FileInfo interface +func (c *Content) ModTime() time.Time { + return c.FileInfo.MTime +} + +// IsDir to part of the os.FileInfo interface +func (c *Content) IsDir() bool { + return false +} + +// Sys to part of the os.FileInfo interface +func (c *Content) Sys() interface{} { + return nil +} + +// ExpandContentGlobs gathers all of the real files to be copied into the package. +func ExpandContentGlobs(contents Contents, disableGlobbing bool) (files Contents, err error) { + for _, f := range contents { + var globbed map[string]string + + switch f.Type { + case "ghost", "symlink", "dir": + // Ghost, symlinks and dirs need to be in the list, but dont glob + // them because they do not really exist + files = append(files, f.WithFileInfoDefaults()) + case "config", "config|noreplace", "file", "": + globbed, err = glob.Glob(f.Source, f.Destination, disableGlobbing) + if err != nil { + return nil, err + } + + files, err = appendGlobbedFiles(files, globbed, f) + if err != nil { + return nil, err + } + default: + return files, fmt.Errorf("invalid file type: %s", f.Type) + } + + } + + err = checkNoCollisions(files) + if err != nil { + return nil, err + } + + // sort the files for reproducibility and general cleanliness + sort.Sort(files) + + return files, nil +} + +func appendGlobbedFiles(all Contents, globbed map[string]string, origFile *Content) (Contents, error) { + for src, dst := range globbed { + // if the file has a FileInfo, we need to copy it but recalculate its size + newFileInfo := origFile.FileInfo + if newFileInfo != nil { + newFileInfoVal := *newFileInfo + newFileInfoVal.Size = 0 + newFileInfo = &newFileInfoVal + } + newFile := (&Content{ + Destination: ToNixPath(dst), + Source: ToNixPath(src), + Type: origFile.Type, + FileInfo: newFileInfo, + Packager: origFile.Packager, + }).WithFileInfoDefaults() + if dst, err := os.Readlink(src); err == nil { + newFile.Source = dst + newFile.Type = "symlink" + } + + all = append(all, newFile) + } + + return all, nil +} + +var ErrContentCollision = fmt.Errorf("content collision") + +func checkNoCollisions(contents Contents) error { + alreadyPresent := map[string]*Content{} + + for _, elem := range contents { + present, ok := alreadyPresent[elem.Destination] + if ok && (present.Packager == "" || elem.Packager == "" || present.Packager == elem.Packager) { + if elem.Type == "dir" { + return fmt.Errorf("cannot add directory %q because it is already present: %w", + elem.Destination, ErrContentCollision) + } + + return fmt.Errorf( + "cannot add %q because %q is already present at the same destination (%s): %w", + elem.Source, present.Source, present.Destination, ErrContentCollision) + } + + alreadyPresent[elem.Destination] = elem + } + + return nil +} + +// ToNixPath converts the given path to a nix-style path. +// +// Windows-style path separators are considered escape +// characters by some libraries, which can cause issues. +func ToNixPath(path string) string { + return filepath.ToSlash(filepath.Clean(path)) +} diff --git a/vendor/github.com/goreleaser/nfpm/v2/internal/cmd/doc.go b/vendor/github.com/goreleaser/nfpm/v2/internal/cmd/doc.go new file mode 100644 index 0000000000..45e4f2c921 --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/internal/cmd/doc.go @@ -0,0 +1,2 @@ +// Package cmd contains the main nfpm cli source code. +package cmd diff --git a/vendor/github.com/goreleaser/nfpm/v2/internal/cmd/docs.go b/vendor/github.com/goreleaser/nfpm/v2/internal/cmd/docs.go new file mode 100644 index 0000000000..64cd516dac --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/internal/cmd/docs.go @@ -0,0 +1,35 @@ +package cmd + +import ( + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/cobra/doc" +) + +type docsCmd struct { + cmd *cobra.Command +} + +func newDocsCmd() *docsCmd { + root := &docsCmd{} + cmd := &cobra.Command{ + Use: "docs", + Short: "Generates nFPM's command line docs", + SilenceUsage: true, + DisableFlagsInUseLine: true, + Hidden: true, + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + root.cmd.Root().DisableAutoGenTag = true + return doc.GenMarkdownTreeCustom(root.cmd.Root(), "www/docs/cmd", func(_ string) string { + return "" + }, func(s string) string { + return "/cmd/" + strings.TrimSuffix(s, ".md") + "/" + }) + }, + } + + root.cmd = cmd + return root +} diff --git a/vendor/github.com/goreleaser/nfpm/v2/internal/cmd/init.go b/vendor/github.com/goreleaser/nfpm/v2/internal/cmd/init.go new file mode 100644 index 0000000000..a5a0c4e1b7 --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/internal/cmd/init.go @@ -0,0 +1,90 @@ +package cmd + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" +) + +type initCmd struct { + cmd *cobra.Command + config string +} + +func newInitCmd() *initCmd { + root := &initCmd{} + cmd := &cobra.Command{ + Use: "init", + Aliases: []string{"i"}, + Short: "Creates a sample nfpm.yaml configuration file", + SilenceUsage: true, + SilenceErrors: true, + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + if err := os.WriteFile(root.config, []byte(example), 0o666); err != nil { + return fmt.Errorf("failed to create example file: %w", err) + } + return nil + }, + } + + cmd.Flags().StringVarP(&root.config, "config", "f", "nfpm.yaml", "path to the to-be-created config file") + + root.cmd = cmd + return root +} + +const example = `# nfpm example configuration file +# +# check https://nfpm.goreleaser.com/configuration for detailed usage +# +name: "foo" +arch: "amd64" +platform: "linux" +version: "v1.0.0" +section: "default" +priority: "extra" +replaces: +- foobar +provides: +- bar +depends: +- foo +- bar +recommends: +- whatever +suggests: +- something-else +conflicts: +- not-foo +- not-bar +maintainer: "John Doe " +description: | + FooBar is the great foo and bar software. + And this can be in multiple lines! +vendor: "FooBarCorp" +homepage: "http://example.com" +license: "MIT" +changelog: "changelog.yaml" +contents: +- src: ./foo + dst: /usr/bin/foo +- src: ./bar + dst: /usr/bin/bar +- src: ./foobar.conf + dst: /etc/foobar.conf + type: config +- src: /usr/bin/foo + dst: /sbin/foo + type: symlink +overrides: + rpm: + scripts: + preinstall: ./scripts/preinstall.sh + postremove: ./scripts/postremove.sh + deb: + scripts: + postinstall: ./scripts/postinstall.sh + preremove: ./scripts/preremove.sh +` diff --git a/vendor/github.com/goreleaser/nfpm/v2/internal/cmd/man.go b/vendor/github.com/goreleaser/nfpm/v2/internal/cmd/man.go new file mode 100644 index 0000000000..09768f16d3 --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/internal/cmd/man.go @@ -0,0 +1,38 @@ +package cmd + +import ( + "fmt" + "os" + + mcobra "github.com/muesli/mango-cobra" + "github.com/muesli/roff" + "github.com/spf13/cobra" +) + +type manCmd struct { + cmd *cobra.Command +} + +func newManCmd() *manCmd { + root := &manCmd{} + cmd := &cobra.Command{ + Use: "man", + Short: "Generates nfpm's command line manpages", + SilenceUsage: true, + DisableFlagsInUseLine: true, + Hidden: true, + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + manPage, err := mcobra.NewManPage(1, root.cmd.Root()) + if err != nil { + return err + } + + _, err = fmt.Fprint(os.Stdout, manPage.Build(roff.NewDocument())) + return err + }, + } + + root.cmd = cmd + return root +} diff --git a/vendor/github.com/goreleaser/nfpm/v2/internal/cmd/package.go b/vendor/github.com/goreleaser/nfpm/v2/internal/cmd/package.go new file mode 100644 index 0000000000..334dfe9ac5 --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/internal/cmd/package.go @@ -0,0 +1,106 @@ +package cmd + +import ( + "errors" + "fmt" + "os" + "path" + "path/filepath" + + "github.com/goreleaser/nfpm/v2" + "github.com/spf13/cobra" +) + +type packageCmd struct { + cmd *cobra.Command + config string + target string + packager string +} + +func newPackageCmd() *packageCmd { + root := &packageCmd{} + cmd := &cobra.Command{ + Use: "package", + Aliases: []string{"pkg", "p"}, + Short: "Creates a package based on the given config file and flags", + SilenceUsage: true, + SilenceErrors: true, + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + return doPackage(root.config, root.target, root.packager) + }, + } + + cmd.Flags().StringVarP(&root.config, "config", "f", "nfpm.yaml", "config file to be used") + cmd.Flags().StringVarP(&root.target, "target", "t", "", "where to save the generated package (filename, folder or empty for current folder)") + cmd.Flags().StringVarP(&root.packager, "packager", "p", "", "which packager implementation to use [apk|deb|rpm|archlinux]") + + root.cmd = cmd + return root +} + +var errInsufficientParams = errors.New("a packager must be specified if target is a directory or blank") + +// nolint:funlen +func doPackage(configPath, target, packager string) error { + targetIsADirectory := false + stat, err := os.Stat(target) + if err == nil && stat.IsDir() { + targetIsADirectory = true + } + + if packager == "" { + ext := filepath.Ext(target) + if targetIsADirectory || ext == "" { + return errInsufficientParams + } + + packager = ext[1:] + fmt.Println("guessing packager from target file extension...") + } + + config, err := nfpm.ParseFile(configPath) + if err != nil { + return err + } + + info, err := config.Get(packager) + if err != nil { + return err + } + + info = nfpm.WithDefaults(info) + + fmt.Printf("using %s packager...\n", packager) + pkg, err := nfpm.Get(packager) + if err != nil { + return err + } + + if target == "" { + // if no target was specified create a package in + // current directory with a conventional file name + target = pkg.ConventionalFileName(info) + } else if targetIsADirectory { + // if a directory was specified as target, create + // a package with conventional file name there + target = path.Join(target, pkg.ConventionalFileName(info)) + } + + f, err := os.Create(target) + if err != nil { + return err + } + defer f.Close() + + info.Target = target + + if err := pkg.Package(info, f); err != nil { + os.Remove(target) + return err + } + + fmt.Printf("created package: %s\n", target) + return f.Close() +} diff --git a/vendor/github.com/goreleaser/nfpm/v2/internal/cmd/root.go b/vendor/github.com/goreleaser/nfpm/v2/internal/cmd/root.go new file mode 100644 index 0000000000..b917e02c26 --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/internal/cmd/root.go @@ -0,0 +1,55 @@ +package cmd + +import ( + "fmt" + + _ "github.com/goreleaser/nfpm/v2/apk" + _ "github.com/goreleaser/nfpm/v2/arch" + _ "github.com/goreleaser/nfpm/v2/deb" + _ "github.com/goreleaser/nfpm/v2/rpm" + "github.com/spf13/cobra" +) + +func Execute(version string, exit func(int), args []string) { + newRootCmd(version, exit).Execute(args) +} + +type rootCmd struct { + cmd *cobra.Command + exit func(int) +} + +func (cmd *rootCmd) Execute(args []string) { + cmd.cmd.SetArgs(args) + + if err := cmd.cmd.Execute(); err != nil { + fmt.Println(err.Error()) + cmd.exit(1) + } +} + +func newRootCmd(version string, exit func(int)) *rootCmd { + root := &rootCmd{ + exit: exit, + } + cmd := &cobra.Command{ + Use: "nfpm", + Short: "Packages apps on RPM, Deb and APK formats based on a YAML configuration file", + Long: `nFPM is a simple, 0-dependencies, deb, rpm and apk packager.`, + Version: version, + SilenceUsage: true, + SilenceErrors: true, + Args: cobra.NoArgs, + } + + cmd.AddCommand( + newInitCmd().cmd, + newPackageCmd().cmd, + newDocsCmd().cmd, + newManCmd().cmd, + newSchemaCmd().cmd, + ) + + root.cmd = cmd + return root +} diff --git a/vendor/github.com/goreleaser/nfpm/v2/internal/cmd/schema.go b/vendor/github.com/goreleaser/nfpm/v2/internal/cmd/schema.go new file mode 100644 index 0000000000..fa77a4a1dc --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/internal/cmd/schema.go @@ -0,0 +1,53 @@ +package cmd + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + + "github.com/goreleaser/nfpm/v2" + "github.com/invopop/jsonschema" + "github.com/spf13/cobra" +) + +type schemaCmd struct { + cmd *cobra.Command + output string +} + +func newSchemaCmd() *schemaCmd { + root := &schemaCmd{} + cmd := &cobra.Command{ + Use: "jsonschema", + Aliases: []string{"schema"}, + Short: "Outputs nFPM's JSON schema", + SilenceUsage: true, + SilenceErrors: true, + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + schema := jsonschema.Reflect(&nfpm.Config{}) + schema.Description = "nFPM configuration definition file" + bts, err := json.MarshalIndent(schema, " ", " ") + if err != nil { + return fmt.Errorf("failed to create jsonschema: %w", err) + } + if root.output == "-" { + fmt.Println(string(bts)) + return nil + } + if err := os.MkdirAll(filepath.Dir(root.output), 0o755); err != nil { + return fmt.Errorf("failed to write jsonschema file: %w", err) + } + if err := os.WriteFile(root.output, bts, 0o666); err != nil { + return fmt.Errorf("failed to write jsonschema file: %w", err) + } + return nil + }, + } + + cmd.Flags().StringVarP(&root.output, "output", "o", "-", "where to save the json schema") + + root.cmd = cmd + return root +} diff --git a/vendor/github.com/goreleaser/nfpm/v2/internal/glob/glob.go b/vendor/github.com/goreleaser/nfpm/v2/internal/glob/glob.go new file mode 100644 index 0000000000..e6fca62814 --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/internal/glob/glob.go @@ -0,0 +1,111 @@ +// Package glob provides file globbing for use in nfpm.Packager implementations +package glob + +import ( + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + "strings" + + "github.com/goreleaser/fileglob" +) + +// longestCommonPrefix returns the longest prefix of all strings the argument +// slice. If the slice is empty the empty string is returned. +func longestCommonPrefix(strs []string) string { + if len(strs) == 0 { + return "" + } + lcp := strs[0] + for _, str := range strs { + lcp = strlcp(lcp, str) + } + return lcp +} + +func strlcp(a, b string) string { + var min int + if len(a) > len(b) { + min = len(b) + } else { + min = len(a) + } + for i := 0; i < min; i++ { + if a[i] != b[i] { + return a[0:i] + } + } + return a[0:min] +} + +// ErrGlobNoMatch happens when no files matched the given glob. +type ErrGlobNoMatch struct { + glob string +} + +func (e ErrGlobNoMatch) Error() string { + return fmt.Sprintf("glob failed: %s: no matching files", e.glob) +} + +// Glob returns a map with source file path as keys and destination as values. +// First the longest common prefix (lcp) of all globbed files is found. The destination +// for each globbed file is then dst joined with src with the lcp trimmed off. +func Glob(pattern, dst string, ignoreMatchers bool) (map[string]string, error) { + options := []fileglob.OptFunc{fileglob.MatchDirectoryIncludesContents} + if ignoreMatchers { + options = append(options, fileglob.QuoteMeta) + } + + if strings.HasPrefix(pattern, "../") { + p, err := filepath.Abs(pattern) + if err != nil { + return nil, fmt.Errorf("failed to resolve pattern: %s: %w", pattern, err) + } + pattern = filepath.ToSlash(p) + } + + matches, err := fileglob.Glob(pattern, append(options, fileglob.MaybeRootFS)...) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return nil, err + } + + return nil, fmt.Errorf("glob failed: %s: %w", pattern, err) + } + + if len(matches) == 0 { + return nil, ErrGlobNoMatch{pattern} + } + + files := make(map[string]string) + prefix := pattern + // the prefix may not be a complete path or may use glob patterns, in that case use the parent directory + if _, err := os.Stat(prefix); errors.Is(err, fs.ErrNotExist) || (fileglob.ContainsMatchers(pattern) && !ignoreMatchers) { + prefix = filepath.Dir(longestCommonPrefix(matches)) + } + + for _, src := range matches { + // only include files + if f, err := os.Stat(src); err == nil && f.Mode().IsDir() { + continue + } + + if strings.HasSuffix(dst, "/") { + files[src] = filepath.Join(dst, filepath.Base(src)) + continue + } + + relpath, err := filepath.Rel(prefix, src) + if err != nil { + // since prefix is a prefix of src a relative path should always be found + return nil, err + } + + globdst := filepath.ToSlash(filepath.Join(dst, relpath)) + files[src] = globdst + } + + return files, nil +} diff --git a/vendor/github.com/aokoli/goutils/LICENSE.txt b/vendor/github.com/goreleaser/nfpm/v2/internal/rpmpack/LICENSE similarity index 100% rename from vendor/github.com/aokoli/goutils/LICENSE.txt rename to vendor/github.com/goreleaser/nfpm/v2/internal/rpmpack/LICENSE diff --git a/vendor/github.com/bufbuild/buf/private/pkg/observability/wrapped_round_tripper.go b/vendor/github.com/goreleaser/nfpm/v2/internal/rpmpack/dir.go similarity index 54% rename from vendor/github.com/bufbuild/buf/private/pkg/observability/wrapped_round_tripper.go rename to vendor/github.com/goreleaser/nfpm/v2/internal/rpmpack/dir.go index 5b04b6f84e..031cad14b8 100644 --- a/vendor/github.com/bufbuild/buf/private/pkg/observability/wrapped_round_tripper.go +++ b/vendor/github.com/goreleaser/nfpm/v2/internal/rpmpack/dir.go @@ -1,4 +1,4 @@ -// Copyright 2020-2023 Buf Technologies, Inc. +// Copyright 2019 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,24 +12,29 @@ // See the License for the specific language governing permissions and // limitations under the License. -package observability +package rpmpack -import ( - "net/http" - - "go.opencensus.io/tag" -) +// dirIndex holds the index from files to directory names. +type dirIndex struct { + m map[string]uint32 + l []string +} -type wrappedRoundTripper struct { - Base http.RoundTripper - Tags []tag.Mutator +func newDirIndex() *dirIndex { + return &dirIndex{m: make(map[string]uint32)} } -func (w *wrappedRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - ctx, err := tag.New(req.Context(), w.Tags...) - if err != nil { - return nil, err +func (d *dirIndex) Get(value string) uint32 { + if idx, ok := d.m[value]; ok { + return idx } - wrappedRequest := req.WithContext(ctx) - return w.Base.RoundTrip(wrappedRequest) + newIdx := uint32(len(d.l)) + d.l = append(d.l, value) + + d.m[value] = newIdx + return newIdx +} + +func (d *dirIndex) AllDirs() []string { + return d.l } diff --git a/vendor/github.com/goreleaser/nfpm/v2/internal/rpmpack/file_types.go b/vendor/github.com/goreleaser/nfpm/v2/internal/rpmpack/file_types.go new file mode 100644 index 0000000000..720f7335f6 --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/internal/rpmpack/file_types.go @@ -0,0 +1,47 @@ +package rpmpack + +// FileType is the type of a file inside a RPM package. +type FileType int32 + +// https://refspecs.linuxbase.org/LSB_3.1.1/LSB-Core-generic/LSB-Core-generic/pkgformat.html#AEN27560 +// The RPMFile.Type tag value shall identify various characteristics of the file in the payload that it describes. +// It shall be an INT32 value consisting of either the value GenericFile (0) or +// the bitwise inclusive or of one or more of the following values. Some of these combinations may make no sense +const ( + // GenericFile is just a basic file in an RPM + GenericFile FileType = 1 << iota >> 1 + // ConfigFile is a configuration file, and an existing file should be saved during a + // package upgrade operation and not removed during a package removal operation. + ConfigFile + // DocFile is a file that contains documentation. + DocFile + // DoNotUseFile is reserved for future use; conforming packages may not use this flag. + DoNotUseFile + // MissingOkFile need not exist on the installed system. + MissingOkFile + // NoReplaceFile similar to the ConfigFile, this flag indicates that during an upgrade operation + // the original file on the system should not be altered. + NoReplaceFile + // SpecFile is the package specification file. + SpecFile + // GhostFile is not actually included in the payload, but should still be considered as a part of the package. + // For example, a log file generated by the application at run time. + GhostFile + // LicenceFile contains the license conditions. + LicenceFile + // ReadmeFile contains high level notes about the package. + ReadmeFile + // ExcludeFile is not a part of the package, and should not be installed. + ExcludeFile +) + +// RPMFile contains a particular file's entry and data. +type RPMFile struct { + Name string + Body []byte + Mode uint + Owner string + Group string + MTime uint32 + Type FileType +} diff --git a/vendor/github.com/goreleaser/nfpm/v2/internal/rpmpack/header.go b/vendor/github.com/goreleaser/nfpm/v2/internal/rpmpack/header.go new file mode 100644 index 0000000000..b223672ef8 --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/internal/rpmpack/header.go @@ -0,0 +1,211 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpmpack + +import ( + "bytes" + "encoding/binary" + "fmt" + "sort" + + "github.com/pkg/errors" +) + +const ( + signatures = 0x3e + immutable = 0x3f + + typeInt16 = 0x03 + typeInt32 = 0x04 + typeString = 0x06 + typeBinary = 0x07 + typeStringArray = 0x08 +) + +// Only integer types are aligned. This is not just an optimization - some versions +// of rpm fail when integers are not aligned. Other versions fail when non-integers are aligned. +var boundaries = map[int]int{ + typeInt16: 2, + typeInt32: 4, +} + +type IndexEntry struct { + rpmtype, count int + data []byte +} + +func (e IndexEntry) indexBytes(tag, contentOffset int) []byte { + b := &bytes.Buffer{} + if err := binary.Write(b, binary.BigEndian, []int32{int32(tag), int32(e.rpmtype), int32(contentOffset), int32(e.count)}); err != nil { + // binary.Write can fail if the underlying Write fails, or the types are invalid. + // bytes.Buffer's write never error out, it can only panic with OOM. + panic(err) + } + return b.Bytes() +} + +func intEntry(rpmtype, size int, value interface{}) IndexEntry { + b := &bytes.Buffer{} + if err := binary.Write(b, binary.BigEndian, value); err != nil { + // binary.Write can fail if the underlying Write fails, or the types are invalid. + // bytes.Buffer's write never error out, it can only panic with OOM. + panic(err) + } + return IndexEntry{rpmtype, size, b.Bytes()} +} + +func EntryInt16(value []int16) IndexEntry { + return intEntry(typeInt16, len(value), value) +} + +func EntryUint16(value []uint16) IndexEntry { + return intEntry(typeInt16, len(value), value) +} + +func EntryInt32(value []int32) IndexEntry { + return intEntry(typeInt32, len(value), value) +} + +func EntryUint32(value []uint32) IndexEntry { + return intEntry(typeInt32, len(value), value) +} + +func EntryString(value string) IndexEntry { + return IndexEntry{typeString, 1, append([]byte(value), byte(0o0))} +} + +func EntryBytes(value []byte) IndexEntry { + return IndexEntry{typeBinary, len(value), value} +} + +func EntryStringSlice(value []string) IndexEntry { + b := [][]byte{} + for _, v := range value { + b = append(b, []byte(v)) + } + bb := append(bytes.Join(b, []byte{0o0}), byte(0o0)) + return IndexEntry{typeStringArray, len(value), bb} +} + +type index struct { + entries map[int]IndexEntry + h int +} + +func newIndex(h int) *index { + return &index{entries: make(map[int]IndexEntry), h: h} +} + +func (i *index) Add(tag int, e IndexEntry) { + i.entries[tag] = e +} + +func (i *index) AddEntries(m map[int]IndexEntry) { + for t, e := range m { + i.Add(t, e) + } +} + +func (i *index) sortedTags() []int { + t := []int{} + for k := range i.entries { + t = append(t, k) + } + sort.Ints(t) + return t +} + +func pad(w *bytes.Buffer, rpmtype, offset int) { + // We need to align integer entries... + if b, ok := boundaries[rpmtype]; ok && offset%b != 0 { + if _, err := w.Write(make([]byte, b-offset%b)); err != nil { + // binary.Write can fail if the underlying Write fails, or the types are invalid. + // bytes.Buffer's write never error out, it can only panic with OOM. + panic(err) + } + } +} + +// Bytes returns the bytes of the index. +func (i *index) Bytes() ([]byte, error) { + w := &bytes.Buffer{} + // Even the header has three parts: The lead, the index entries, and the entries. + // Because of alignment, we can only tell the actual size and offset after writing + // the entries. + entryData := &bytes.Buffer{} + tags := i.sortedTags() + offsets := make([]int, len(tags)) + for ii, tag := range tags { + e := i.entries[tag] + pad(entryData, e.rpmtype, entryData.Len()) + offsets[ii] = entryData.Len() + entryData.Write(e.data) + } + entryData.Write(i.eigenHeader().data) + + // 4 magic and 4 reserved + w.Write([]byte{0x8e, 0xad, 0xe8, 0x01, 0, 0, 0, 0}) + // 4 count and 4 size + // We add the pseudo-entry "eigenHeader" to count. + if err := binary.Write(w, binary.BigEndian, []int32{int32(len(i.entries)) + 1, int32(entryData.Len())}); err != nil { + return nil, errors.Wrap(err, "failed to write eigenHeader") + } + // Write the eigenHeader index entry + w.Write(i.eigenHeader().indexBytes(i.h, entryData.Len()-0x10)) + // Write all of the other index entries + for ii, tag := range tags { + e := i.entries[tag] + w.Write(e.indexBytes(tag, offsets[ii])) + } + w.Write(entryData.Bytes()) + return w.Bytes(), nil +} + +// the eigenHeader is a weird entry. Its index entry is sorted first, but its content +// is last. The content is a 16 byte index entry, which is almost the same as the index +// entry except for the offset. The offset here is ... minus the length of the index entry region. +// Which is always 0x10 * number of entries. +// I kid you not. +func (i *index) eigenHeader() IndexEntry { + b := &bytes.Buffer{} + if err := binary.Write(b, binary.BigEndian, []int32{int32(i.h), int32(typeBinary), -int32(0x10 * (len(i.entries) + 1)), int32(0x10)}); err != nil { + // binary.Write can fail if the underlying Write fails, or the types are invalid. + // bytes.Buffer's write never error out, it can only panic with OOM. + panic(err) + } + + return EntryBytes(b.Bytes()) +} + +func lead(name, fullVersion string) []byte { + // RPM format = 0xedabeedb + // version 3.0 = 0x0300 + // type binary = 0x0000 + // machine archnum (i386?) = 0x0001 + // name ( 66 bytes, with null termination) + // osnum (linux?) = 0x0001 + // sig type (header-style) = 0x0005 + // reserved 16 bytes of 0x00 + n := []byte(fmt.Sprintf("%s-%s", name, fullVersion)) + if len(n) > 65 { + n = n[:65] + } + n = append(n, make([]byte, 66-len(n))...) + b := []byte{0xed, 0xab, 0xee, 0xdb, 0x03, 0x00, 0x00, 0x00, 0x00, 0x01} + b = append(b, n...) + b = append(b, []byte{0x00, 0x01, 0x00, 0x05}...) + b = append(b, make([]byte, 16)...) + return b +} diff --git a/vendor/github.com/goreleaser/nfpm/v2/internal/rpmpack/rpm.go b/vendor/github.com/goreleaser/nfpm/v2/internal/rpmpack/rpm.go new file mode 100644 index 0000000000..2c243e410d --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/internal/rpmpack/rpm.go @@ -0,0 +1,550 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package rpmpack packs files to rpm files. +// It is designed to be simple to use and deploy, not requiring any filesystem access +// to create rpm files. +package rpmpack + +import ( + "bytes" + "crypto/sha256" + "fmt" + "io" + "path" + "sort" + "strconv" + "strings" + "time" + + "github.com/cavaliergopher/cpio" + "github.com/klauspost/compress/zstd" + gzip "github.com/klauspost/pgzip" + "github.com/pkg/errors" + "github.com/ulikunitz/xz" + "github.com/ulikunitz/xz/lzma" +) + +var ( + // ErrWriteAfterClose is returned when a user calls Write() on a closed rpm. + ErrWriteAfterClose = errors.New("rpm write after close") + // ErrWrongFileOrder is returned when files are not sorted by name. + ErrWrongFileOrder = errors.New("wrong file addition order") +) + +// RPMMetaData contains meta info about the whole package. +type RPMMetaData struct { + Name, + Summary, + Description, + Version, + Release, + Arch, + OS, + Vendor, + URL, + Packager, + Group, + Licence, + BuildHost, + Compressor string + Epoch uint32 + BuildTime time.Time + Provides, + Obsoletes, + Suggests, + Recommends, + Requires, + Conflicts Relations +} + +// RPM holds the state of a particular rpm file. Please use NewRPM to instantiate it. +type RPM struct { + RPMMetaData + di *dirIndex + payload *bytes.Buffer + payloadSize uint + cpio *cpio.Writer + basenames []string + dirindexes []uint32 + filesizes []uint32 + filemodes []uint16 + fileowners []string + filegroups []string + filemtimes []uint32 + filedigests []string + filelinktos []string + fileflags []uint32 + closed bool + compressedPayload io.WriteCloser + files map[string]RPMFile + prein string + postin string + preun string + postun string + pretrans string + posttrans string + customTags map[int]IndexEntry + customSigs map[int]IndexEntry + pgpSigner func([]byte) ([]byte, error) +} + +// NewRPM creates and returns a new RPM struct. +func NewRPM(m RPMMetaData) (*RPM, error) { + var err error + + if m.OS == "" { + m.OS = "linux" + } + + if m.Arch == "" { + m.Arch = "noarch" + } + + p := &bytes.Buffer{} + + z, compressorName, err := setupCompressor(m.Compressor, p) + if err != nil { + return nil, err + } + + // only use compressor name for the rpm tag, not the level + m.Compressor = compressorName + + rpm := &RPM{ + RPMMetaData: m, + di: newDirIndex(), + payload: p, + compressedPayload: z, + cpio: cpio.NewWriter(z), + files: make(map[string]RPMFile), + customTags: make(map[int]IndexEntry), + customSigs: make(map[int]IndexEntry), + } + + // A package must provide itself... + rpm.Provides.addIfMissing(&Relation{ + Name: rpm.Name, + Version: rpm.FullVersion(), + Sense: SenseEqual, + }) + + return rpm, nil +} + +func setupCompressor(compressorSetting string, w io.Writer) (wc io.WriteCloser, + compressorType string, err error, +) { + parts := strings.Split(compressorSetting, ":") + if len(parts) > 2 { + return nil, "", fmt.Errorf("malformed compressor setting: %s", compressorSetting) + } + + compressorType = parts[0] + compressorLevel := "" + if len(parts) == 2 { + compressorLevel = parts[1] + } + + switch compressorType { + case "": + compressorType = "gzip" + fallthrough + case "gzip": + level := 9 + + if compressorLevel != "" { + var err error + + level, err = strconv.Atoi(compressorLevel) + if err != nil { + return nil, "", fmt.Errorf("parse gzip compressor level: %w", err) + } + } + + wc, err = gzip.NewWriterLevel(w, level) + case "lzma": + if compressorLevel != "" { + return nil, "", fmt.Errorf("no compressor level supported for lzma: %s", compressorLevel) + } + + wc, err = lzma.NewWriter(w) + case "xz": + if compressorLevel != "" { + return nil, "", fmt.Errorf("no compressor level supported for xz: %s", compressorLevel) + } + + wc, err = xz.NewWriter(w) + case "zstd": + level := zstd.SpeedBetterCompression + + if compressorLevel != "" { + var ok bool + + if intLevel, err := strconv.Atoi(compressorLevel); err == nil { + level = zstd.EncoderLevelFromZstd(intLevel) + } else { + ok, level = zstd.EncoderLevelFromString(compressorLevel) + if !ok { + return nil, "", fmt.Errorf("invalid zstd compressor level: %s", compressorLevel) + } + } + } + + wc, err = zstd.NewWriter(w, zstd.WithEncoderLevel(level)) + default: + return nil, "", fmt.Errorf("unknown compressor type: %s", compressorType) + } + + return wc, compressorType, err +} + +// FullVersion properly combines version and release fields to a version string +func (r *RPM) FullVersion() string { + if r.Release != "" { + return fmt.Sprintf("%s-%s", r.Version, r.Release) + } + + return r.Version +} + +// AllowListDirs removes all directories which are not explicitly allowlisted. +func (r *RPM) AllowListDirs(allowList map[string]bool) { + for fn, ff := range r.files { + if ff.Mode&0o40000 == 0o40000 { + if !allowList[fn] { + delete(r.files, fn) + } + } + } +} + +// Write closes the rpm and writes the whole rpm to an io.Writer +func (r *RPM) Write(w io.Writer) error { + if r.closed { + return ErrWriteAfterClose + } + // Add all of the files, sorted alphabetically. + fnames := []string{} + for fn := range r.files { + fnames = append(fnames, fn) + } + sort.Strings(fnames) + for _, fn := range fnames { + if err := r.writeFile(r.files[fn]); err != nil { + return errors.Wrapf(err, "failed to write file %q", fn) + } + } + if err := r.cpio.Close(); err != nil { + return errors.Wrap(err, "failed to close cpio payload") + } + if err := r.compressedPayload.Close(); err != nil { + return errors.Wrap(err, "failed to close gzip payload") + } + + if _, err := w.Write(lead(r.Name, r.FullVersion())); err != nil { + return errors.Wrap(err, "failed to write lead") + } + // Write the regular header. + h := newIndex(immutable) + r.writeGenIndexes(h) + + // do not write file indexes if there are no files (meta package) + // doing so will result in an invalid package + if (len(r.files)) > 0 { + r.writeFileIndexes(h) + } + + if err := r.writeRelationIndexes(h); err != nil { + return err + } + // CustomTags must be the last to be added, because they can overwrite values. + h.AddEntries(r.customTags) + hb, err := h.Bytes() + if err != nil { + return errors.Wrap(err, "failed to retrieve header") + } + // Write the signatures + s := newIndex(signatures) + if err := r.writeSignatures(s, hb); err != nil { + return errors.Wrap(err, "failed to create signatures") + } + + s.AddEntries(r.customSigs) + sb, err := s.Bytes() + if err != nil { + return errors.Wrap(err, "failed to retrieve signatures header") + } + + if _, err := w.Write(sb); err != nil { + return errors.Wrap(err, "failed to write signature bytes") + } + // Signatures are padded to 8-byte boundaries + if _, err := w.Write(make([]byte, (8-len(sb)%8)%8)); err != nil { + return errors.Wrap(err, "failed to write signature padding") + } + if _, err := w.Write(hb); err != nil { + return errors.Wrap(err, "failed to write header body") + } + _, err = w.Write(r.payload.Bytes()) + return errors.Wrap(err, "failed to write payload") +} + +// SetPGPSigner registers a function that will accept the header and payload as bytes, +// and return a signature as bytes. The function should simulate what gpg does, +// probably by using golang.org/x/crypto/openpgp or by forking a gpg process. +func (r *RPM) SetPGPSigner(f func([]byte) ([]byte, error)) { + r.pgpSigner = f +} + +// Only call this after the payload and header were written. +func (r *RPM) writeSignatures(sigHeader *index, regHeader []byte) error { + sigHeader.Add(sigSize, EntryInt32([]int32{int32(r.payload.Len() + len(regHeader))})) + sigHeader.Add(sigSHA256, EntryString(fmt.Sprintf("%x", sha256.Sum256(regHeader)))) + sigHeader.Add(sigPayloadSize, EntryInt32([]int32{int32(r.payloadSize)})) + if r.pgpSigner != nil { + // For sha 256 you need to sign the header and payload separately + header := append([]byte{}, regHeader...) + headerSig, err := r.pgpSigner(header) + if err != nil { + return errors.Wrap(err, "call to signer failed") + } + sigHeader.Add(sigRSA, EntryBytes(headerSig)) + + body := append(header, r.payload.Bytes()...) + bodySig, err := r.pgpSigner(body) + if err != nil { + return errors.Wrap(err, "call to signer failed") + } + sigHeader.Add(sigPGP, EntryBytes(bodySig)) + } + return nil +} + +func (r *RPM) writeRelationIndexes(h *index) error { + // add all relation categories + if err := r.Provides.AddToIndex(h, tagProvides, tagProvideVersion, tagProvideFlags); err != nil { + return errors.Wrap(err, "failed to add provides") + } + if err := r.Obsoletes.AddToIndex(h, tagObsoletes, tagObsoleteVersion, tagObsoleteFlags); err != nil { + return errors.Wrap(err, "failed to add obsoletes") + } + if err := r.Suggests.AddToIndex(h, tagSuggests, tagSuggestVersion, tagSuggestFlags); err != nil { + return errors.Wrap(err, "failed to add suggests") + } + if err := r.Recommends.AddToIndex(h, tagRecommends, tagRecommendVersion, tagRecommendFlags); err != nil { + return errors.Wrap(err, "failed to add recommends") + } + if err := r.Requires.AddToIndex(h, tagRequires, tagRequireVersion, tagRequireFlags); err != nil { + return errors.Wrap(err, "failed to add requires") + } + if err := r.Conflicts.AddToIndex(h, tagConflicts, tagConflictVersion, tagConflictFlags); err != nil { + return errors.Wrap(err, "failed to add conflicts") + } + + return nil +} + +// AddCustomTag adds or overwrites a tag value in the index. +func (r *RPM) AddCustomTag(tag int, e IndexEntry) { + r.customTags[tag] = e +} + +// AddCustomSig adds or overwrites a signature tag value. +func (r *RPM) AddCustomSig(tag int, e IndexEntry) { + r.customSigs[tag] = e +} + +func (r *RPM) writeGenIndexes(h *index) { + h.Add(tagHeaderI18NTable, EntryString("C")) + h.Add(tagSize, EntryInt32([]int32{int32(r.payloadSize)})) + h.Add(tagName, EntryString(r.Name)) + h.Add(tagVersion, EntryString(r.Version)) + h.Add(tagEpoch, EntryUint32([]uint32{r.Epoch})) + h.Add(tagSummary, EntryString(r.Summary)) + h.Add(tagDescription, EntryString(r.Description)) + h.Add(tagBuildHost, EntryString(r.BuildHost)) + if !r.BuildTime.IsZero() { + // time.Time zero value is confusing, avoid if not supplied + // see https://github.com/google/rpmpack/issues/43 + h.Add(tagBuildTime, EntryInt32([]int32{int32(r.BuildTime.Unix())})) + } + h.Add(tagRelease, EntryString(r.Release)) + h.Add(tagPayloadFormat, EntryString("cpio")) + h.Add(tagPayloadCompressor, EntryString(r.Compressor)) + h.Add(tagPayloadFlags, EntryString("9")) + h.Add(tagArch, EntryString(r.Arch)) + h.Add(tagOS, EntryString(r.OS)) + h.Add(tagVendor, EntryString(r.Vendor)) + h.Add(tagLicence, EntryString(r.Licence)) + h.Add(tagPackager, EntryString(r.Packager)) + h.Add(tagGroup, EntryString(r.Group)) + h.Add(tagURL, EntryString(r.URL)) + h.Add(tagPayloadDigest, EntryStringSlice([]string{fmt.Sprintf("%x", sha256.Sum256(r.payload.Bytes()))})) + h.Add(tagPayloadDigestAlgo, EntryInt32([]int32{hashAlgoSHA256})) + + // rpm utilities look for the sourcerpm tag to deduce if this is not a source rpm (if it has a sourcerpm, + // it is NOT a source rpm). + h.Add(tagSourceRPM, EntryString(fmt.Sprintf("%s-%s.src.rpm", r.Name, r.FullVersion()))) + if r.pretrans != "" { + h.Add(tagPretrans, EntryString(r.pretrans)) + h.Add(tagPretransProg, EntryString("/bin/sh")) + } + if r.prein != "" { + h.Add(tagPrein, EntryString(r.prein)) + h.Add(tagPreinProg, EntryString("/bin/sh")) + } + if r.postin != "" { + h.Add(tagPostin, EntryString(r.postin)) + h.Add(tagPostinProg, EntryString("/bin/sh")) + } + if r.preun != "" { + h.Add(tagPreun, EntryString(r.preun)) + h.Add(tagPreunProg, EntryString("/bin/sh")) + } + if r.postun != "" { + h.Add(tagPostun, EntryString(r.postun)) + h.Add(tagPostunProg, EntryString("/bin/sh")) + } + if r.posttrans != "" { + h.Add(tagPosttrans, EntryString(r.posttrans)) + h.Add(tagPosttransProg, EntryString("/bin/sh")) + } +} + +// WriteFileIndexes writes file related index headers to the header +func (r *RPM) writeFileIndexes(h *index) { + h.Add(tagBasenames, EntryStringSlice(r.basenames)) + h.Add(tagDirindexes, EntryUint32(r.dirindexes)) + h.Add(tagDirnames, EntryStringSlice(r.di.AllDirs())) + h.Add(tagFileSizes, EntryUint32(r.filesizes)) + h.Add(tagFileModes, EntryUint16(r.filemodes)) + h.Add(tagFileUserName, EntryStringSlice(r.fileowners)) + h.Add(tagFileGroupName, EntryStringSlice(r.filegroups)) + h.Add(tagFileMTimes, EntryUint32(r.filemtimes)) + h.Add(tagFileDigests, EntryStringSlice(r.filedigests)) + h.Add(tagFileLinkTos, EntryStringSlice(r.filelinktos)) + h.Add(tagFileFlags, EntryUint32(r.fileflags)) + + inodes := make([]int32, len(r.dirindexes)) + digestAlgo := make([]int32, len(r.dirindexes)) + verifyFlags := make([]int32, len(r.dirindexes)) + fileRDevs := make([]int16, len(r.dirindexes)) + fileLangs := make([]string, len(r.dirindexes)) + + for ii := range inodes { + // is inodes just a range from 1..len(dirindexes)? maybe different with hard links + inodes[ii] = int32(ii + 1) + digestAlgo[ii] = hashAlgoSHA256 + // With regular files, it seems like we can always enable all of the verify flags + verifyFlags[ii] = int32(-1) + fileRDevs[ii] = int16(1) + } + h.Add(tagFileINodes, EntryInt32(inodes)) + h.Add(tagFileDigestAlgo, EntryInt32(digestAlgo)) + h.Add(tagFileVerifyFlags, EntryInt32(verifyFlags)) + h.Add(tagFileRDevs, EntryInt16(fileRDevs)) + h.Add(tagFileLangs, EntryStringSlice(fileLangs)) +} + +// AddPretrans adds a pretrans scriptlet +func (r *RPM) AddPretrans(s string) { + r.pretrans = s +} + +// AddPrein adds a prein scriptlet +func (r *RPM) AddPrein(s string) { + r.prein = s +} + +// AddPostin adds a postin scriptlet +func (r *RPM) AddPostin(s string) { + r.postin = s +} + +// AddPreun adds a preun scriptlet +func (r *RPM) AddPreun(s string) { + r.preun = s +} + +// AddPostun adds a postun scriptlet +func (r *RPM) AddPostun(s string) { + r.postun = s +} + +// AddPosttrans adds a posttrans scriptlet +func (r *RPM) AddPosttrans(s string) { + r.posttrans = s +} + +// AddFile adds an RPMFile to an existing rpm. +func (r *RPM) AddFile(f RPMFile) { + if f.Name == "/" { // rpm does not allow the root dir to be included. + return + } + r.files[f.Name] = f +} + +// writeFile writes the file to the indexes and cpio. +func (r *RPM) writeFile(f RPMFile) error { + dir, file := path.Split(f.Name) + r.dirindexes = append(r.dirindexes, r.di.Get(dir)) + r.basenames = append(r.basenames, file) + r.fileowners = append(r.fileowners, f.Owner) + r.filegroups = append(r.filegroups, f.Group) + r.filemtimes = append(r.filemtimes, f.MTime) + r.fileflags = append(r.fileflags, uint32(f.Type)) + + links := 1 + switch { + case f.Mode&0o40000 != 0: // directory + r.filesizes = append(r.filesizes, 4096) + r.filedigests = append(r.filedigests, "") + r.filelinktos = append(r.filelinktos, "") + links = 2 + case f.Mode&0o120000 == 0o120000: // symlink + r.filesizes = append(r.filesizes, uint32(len(f.Body))) + r.filedigests = append(r.filedigests, "") + r.filelinktos = append(r.filelinktos, string(f.Body)) + default: // regular file + f.Mode = f.Mode | 0o100000 + r.filesizes = append(r.filesizes, uint32(len(f.Body))) + r.filedigests = append(r.filedigests, fmt.Sprintf("%x", sha256.Sum256(f.Body))) + r.filelinktos = append(r.filelinktos, "") + } + r.filemodes = append(r.filemodes, uint16(f.Mode)) + + // Ghost files have no payload + if f.Type == GhostFile { + return nil + } + return r.writePayload(f, links) +} + +func (r *RPM) writePayload(f RPMFile, links int) error { + hdr := &cpio.Header{ + Name: f.Name, + Mode: cpio.FileMode(f.Mode), + Size: int64(len(f.Body)), + Links: links, + } + if err := r.cpio.WriteHeader(hdr); err != nil { + return errors.Wrap(err, "failed to write payload file header") + } + if _, err := r.cpio.Write(f.Body); err != nil { + return errors.Wrap(err, "failed to write payload file content") + } + r.payloadSize += uint(len(f.Body)) + return nil +} diff --git a/vendor/github.com/goreleaser/nfpm/v2/internal/rpmpack/sense.go b/vendor/github.com/goreleaser/nfpm/v2/internal/rpmpack/sense.go new file mode 100644 index 0000000000..cf97cad0ab --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/internal/rpmpack/sense.go @@ -0,0 +1,168 @@ +package rpmpack + +import ( + "fmt" + "regexp" + "strings" +) + +type rpmSense uint32 + +// https://github.com/rpm-software-management/rpm/blob/ab01b5eacf9ec6a07a5d9e1991ef476a12d264fd/include/rpm/rpmds.h#L27 +// SenseAny (0) specifies no specific version compare +// SenseLess (2) specifies less then the specified version +// SenseGreater (4) specifies greater then the specified version +// SenseEqual (8) specifies equal to the specified version +const ( + SenseAny rpmSense = 0 + SenseLess = 1 << iota + SenseGreater + SenseEqual + SenseRPMLIB rpmSense = 1 << 24 +) + +var relationMatch = regexp.MustCompile(`([^=<>\s]*)\s*((?:=|>|<)*)\s*(.*)?`) + +// Relation is the structure of rpm sense relationships +type Relation struct { + Name string + Version string + Sense rpmSense +} + +// String return the string representation of the Relation +func (r *Relation) String() string { + return fmt.Sprintf("%s%v%s", r.Name, r.Sense, r.Version) +} + +// Equal compare the equality of two relations +func (r *Relation) Equal(o *Relation) bool { + return r.Name == o.Name && r.Version == o.Version && r.Sense == o.Sense +} + +// Relations is a slice of Relation pointers +type Relations []*Relation + +// String return the string representation of the Relations +func (r *Relations) String() string { + var val []string + for _, rel := range *r { + val = append(val, rel.String()) + } + return strings.Join(val, ",") +} + +// Set parse a string into a Relation and append it to the Relations slice if it is missing +// this is used by the flag package +func (r *Relations) Set(value string) error { + relation, err := NewRelation(value) + if err != nil { + return err + } + r.addIfMissing(relation) + + return nil +} + +func (r *Relations) addIfMissing(value *Relation) { + for _, relation := range *r { + if relation.Equal(value) { + return + } + } + + *r = append(*r, value) +} + +// AddToIndex add the relations to the specified category on the index +func (r *Relations) AddToIndex(h *index, nameTag, versionTag, flagsTag int) error { + var ( + num = len(*r) + names = make([]string, num) + versions = make([]string, num) + flags = make([]uint32, num) + ) + + if num == 0 { + return nil + } + + for idx, relation := range *r { + names[idx] = relation.Name + versions[idx] = relation.Version + flags[idx] = uint32(relation.Sense) + } + + h.Add(nameTag, EntryStringSlice(names)) + h.Add(versionTag, EntryStringSlice(versions)) + h.Add(flagsTag, EntryUint32(flags)) + + return nil +} + +// NewRelation parse a string into a Relation +func NewRelation(related string) (*Relation, error) { + var ( + err error + sense rpmSense + name, + version string + ) + + if strings.HasPrefix(related, "(") && strings.HasSuffix(related, ")") { + // This is a `rich` dependency which must be parsed at install time + // https://rpm-software-management.github.io/rpm/manual/boolean_dependencies.html + sense = SenseAny + name = related + } else { + parts := relationMatch.FindStringSubmatch(related) + if sense, err = parseSense(parts[2]); err != nil { + return nil, err + } + name = parts[1] + version = parts[3] + } + + return &Relation{ + Name: name, + Version: version, + Sense: sense, + }, nil +} + +var stringToSense = map[string]rpmSense{ + "": SenseAny, + "<": SenseLess, + ">": SenseGreater, + "=": SenseEqual, + "<=": SenseLess | SenseEqual, + ">=": SenseGreater | SenseEqual, +} + +// String return the string representation of the rpmSense +func (r rpmSense) String() string { + var ( + val rpmSense + ret string + ) + + for ret, val = range stringToSense { + if r == val { + return ret + } + } + + return "unknown" +} + +func parseSense(sense string) (rpmSense, error) { + var ( + ret rpmSense + ok bool + ) + if ret, ok = stringToSense[sense]; !ok { + return SenseAny, fmt.Errorf("unknown sense value: %s", sense) + } + + return ret, nil +} diff --git a/vendor/github.com/goreleaser/nfpm/v2/internal/rpmpack/tags.go b/vendor/github.com/goreleaser/nfpm/v2/internal/rpmpack/tags.go new file mode 100644 index 0000000000..4359929812 --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/internal/rpmpack/tags.go @@ -0,0 +1,101 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpmpack + +// Define only tags which we actually use +// https://github.com/rpm-software-management/rpm/blob/master/lib/rpmtag.h +const ( + tagHeaderI18NTable = 0x64 // 100 + // Signature tags are obiously overlapping regular header tags.. + sigRSA = 0x010c // 256 + sigSHA256 = 0x0111 // 273 + sigSize = 0x03e8 // 1000 + sigPGP = 0x03ea // 1002 + sigPayloadSize = 0x03ef // 1007 + + // https://github.com/rpm-software-management/rpm/blob/92eadae94c48928bca90693ad63c46ceda37d81f/rpmio/rpmpgp.h#L258 + hashAlgoSHA256 = 0x0008 // 8 + + tagName = 0x03e8 // 1000 + tagVersion = 0x03e9 // 1001 + tagRelease = 0x03ea // 1002 + tagEpoch = 0x03eb // 1003 + tagSummary = 0x03ec // 1004 + tagDescription = 0x03ed // 1005 + tagBuildTime = 0x03ee // 1006 + tagBuildHost = 0x03ef // 1007 + tagSize = 0x03f1 // 1009 + tagVendor = 0x03f3 // 1011 + tagLicence = 0x03f6 // 1014 + tagPackager = 0x03f7 // 1015 + tagGroup = 0x03f8 // 1016 + tagURL = 0x03fc // 1020 + tagOS = 0x03fd // 1021 + tagArch = 0x03fe // 1022 + + tagPrein = 0x03ff // 1023 + tagPostin = 0x0400 // 1024 + tagPreun = 0x0401 // 1025 + tagPostun = 0x0402 // 1026 + + tagFileSizes = 0x0404 // 1028 + tagFileModes = 0x0406 // 1030 + tagFileRDevs = 0x0409 // 1033 + tagFileMTimes = 0x040a // 1034 + tagFileDigests = 0x040b // 1035 + tagFileLinkTos = 0x040c // 1036 + tagFileFlags = 0x040d // 1037 + tagFileUserName = 0x040f // 1039 + tagFileGroupName = 0x0410 // 1040 + tagSourceRPM = 0x0414 // 1044 + tagFileVerifyFlags = 0x0415 // 1045 + tagProvides = 0x0417 // 1047 + tagRequireFlags = 0x0418 // 1048 + tagRequires = 0x0419 // 1049 + tagRequireVersion = 0x041a // 1050 + tagConflictFlags = 0x041d // 1053 + tagConflicts = 0x041e // 1054 + tagConflictVersion = 0x041f // 1055 + tagPreinProg = 0x043d // 1085 + tagPostinProg = 0x043e // 1086 + tagPreunProg = 0x043f // 1087 + tagPostunProg = 0x0440 // 1088 + tagObsoletes = 0x0442 // 1090 + tagFileINodes = 0x0448 // 1096 + tagFileLangs = 0x0449 // 1097 + tagProvideFlags = 0x0458 // 1112 + tagProvideVersion = 0x0459 // 1113 + tagObsoleteFlags = 0x045a // 1114 + tagObsoleteVersion = 0x045b // 1115 + tagDirindexes = 0x045c // 1116 + tagBasenames = 0x045d // 1117 + tagDirnames = 0x045e // 1118 + tagPayloadFormat = 0x0464 // 1124 + tagPayloadCompressor = 0x0465 // 1125 + tagPayloadFlags = 0x0466 // 1126 + tagPretrans = 0x047f // 1151 + tagPosttrans = 0x0480 // 1152 + tagPretransProg = 0x0481 // 1153 + tagPosttransProg = 0x0482 // 1154 + tagFileDigestAlgo = 0x1393 // 5011 + tagRecommends = 0x13b6 // 5046 + tagRecommendVersion = 0x13b7 // 5047 + tagRecommendFlags = 0x13b8 // 5048 + tagSuggests = 0x13b9 // 5049 + tagSuggestVersion = 0x13ba // 5050 + tagSuggestFlags = 0x13bb // 5051 + tagPayloadDigest = 0x13e4 // 5092 + tagPayloadDigestAlgo = 0x13e5 // 5093 +) diff --git a/vendor/github.com/goreleaser/nfpm/v2/internal/sign/pgp.go b/vendor/github.com/goreleaser/nfpm/v2/internal/sign/pgp.go new file mode 100644 index 0000000000..0f6d7ea283 --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/internal/sign/pgp.go @@ -0,0 +1,261 @@ +package sign + +import ( + "bytes" + "crypto" + "errors" + "fmt" + "io" + "os" + "strconv" + "unicode" + + "github.com/ProtonMail/go-crypto/openpgp" + "github.com/ProtonMail/go-crypto/openpgp/clearsign" + "github.com/ProtonMail/go-crypto/openpgp/packet" + "github.com/goreleaser/nfpm/v2" +) + +// PGPSigner returns a PGP signer that creates a detached non-ASCII-armored +// signature and is compatible with rpmpack's signature API. +func PGPSigner(keyFile, passphrase string) func([]byte) ([]byte, error) { + return PGPSignerWithKeyID(keyFile, passphrase, nil) +} + +// PGPSignerWithKeyID returns a PGP signer that creates a detached non-ASCII-armored +// signature and is compatible with rpmpack's signature API. +func PGPSignerWithKeyID(keyFile, passphrase string, hexKeyId *string) func([]byte) ([]byte, error) { + return func(data []byte) ([]byte, error) { + keyId, err := parseKeyID(hexKeyId) + if err != nil { + return nil, fmt.Errorf("%v is not a valid key id: %w", hexKeyId, err) + } + + key, err := readSigningKey(keyFile, passphrase) + if err != nil { + return nil, &nfpm.ErrSigningFailure{Err: err} + } + + var signature bytes.Buffer + + err = openpgp.DetachSign(&signature, key, bytes.NewReader(data), &packet.Config{ + SigningKeyId: keyId, + DefaultHash: crypto.SHA256, + }) + if err != nil { + return nil, &nfpm.ErrSigningFailure{Err: err} + } + + return signature.Bytes(), nil + } +} + +// PGPArmoredDetachSign creates an ASCII-armored detached signature. +func PGPArmoredDetachSign(message io.Reader, keyFile, passphrase string) ([]byte, error) { + return PGPArmoredDetachSignWithKeyID(message, keyFile, passphrase, nil) +} + +// PGPArmoredDetachSignWithKeyID creates an ASCII-armored detached signature. +func PGPArmoredDetachSignWithKeyID(message io.Reader, keyFile, passphrase string, hexKeyId *string) ([]byte, error) { + keyId, err := parseKeyID(hexKeyId) + if err != nil { + return nil, fmt.Errorf("%v is not a valid key id: %w", hexKeyId, err) + } + + key, err := readSigningKey(keyFile, passphrase) + if err != nil { + return nil, fmt.Errorf("armored detach sign: %w", err) + } + + var signature bytes.Buffer + + err = openpgp.ArmoredDetachSign(&signature, key, message, &packet.Config{ + SigningKeyId: keyId, + DefaultHash: crypto.SHA256, + }) + if err != nil { + return nil, fmt.Errorf("armored detach sign: %w", err) + } + + return signature.Bytes(), nil +} + +func PGPClearSignWithKeyID(message io.Reader, keyFile, passphrase string, hexKeyId *string) ([]byte, error) { + keyId, err := parseKeyID(hexKeyId) + if err != nil { + return nil, fmt.Errorf("%v is not a valid key id: %w", hexKeyId, err) + } + + key, err := readSigningKey(keyFile, passphrase) + if err != nil { + return nil, fmt.Errorf("clear sign: %w", err) + } + + var signature bytes.Buffer + + writeCloser, err := clearsign.Encode(&signature, key.PrivateKey, &packet.Config{ + SigningKeyId: keyId, + DefaultHash: crypto.SHA256, + }) + if err != nil { + return nil, fmt.Errorf("clear sign: %w", err) + } + + if _, err := io.Copy(writeCloser, message); err != nil { + return nil, fmt.Errorf("clear sign: %w", err) + } + + if err := writeCloser.Close(); err != nil { + return nil, fmt.Errorf("clear sign: %w", err) + } + + return signature.Bytes(), nil +} + +// PGPVerify is exported for use in tests and verifies an ASCII-armored or non-ASCII-armored +// signature using an ASCII-armored or non-ASCII-armored public key file. The signer +// identity is not explicitly checked, other that the obvious fact that the signer's key must +// be in the armoredPubKeyFile. +func PGPVerify(message io.Reader, signature []byte, armoredPubKeyFile string) error { + keyFileContent, err := os.ReadFile(armoredPubKeyFile) + if err != nil { + return fmt.Errorf("reading armored public key file: %w", err) + } + + var keyring openpgp.EntityList + + if isASCII(keyFileContent) { + keyring, err = openpgp.ReadArmoredKeyRing(bytes.NewReader(keyFileContent)) + if err != nil { + return fmt.Errorf("decoding armored public key file: %w", err) + } + } else { + keyring, err = openpgp.ReadKeyRing(bytes.NewReader(keyFileContent)) + if err != nil { + return fmt.Errorf("decoding public key file: %w", err) + } + } + + if isASCII(signature) { + _, err = openpgp.CheckArmoredDetachedSignature(keyring, message, bytes.NewReader(signature), nil) + return err + } + + _, err = openpgp.CheckDetachedSignature(keyring, message, bytes.NewReader(signature), nil) + return err +} + +func PGPReadMessage(message []byte, armoredPubKeyFile string) error { + keyFileContent, err := os.ReadFile(armoredPubKeyFile) + if err != nil { + return fmt.Errorf("reading armored public key file: %w", err) + } + + var keyring openpgp.EntityList + + if isASCII(keyFileContent) { + keyring, err = openpgp.ReadArmoredKeyRing(bytes.NewReader(keyFileContent)) + if err != nil { + return fmt.Errorf("decoding armored public key file: %w", err) + } + } else { + keyring, err = openpgp.ReadKeyRing(bytes.NewReader(keyFileContent)) + if err != nil { + return fmt.Errorf("decoding public key file: %w", err) + } + } + + block, _ := clearsign.Decode(message) + _, err = block.VerifySignature(keyring, nil) + + return err +} + +func parseKeyID(hexKeyId *string) (uint64, error) { + if hexKeyId == nil || *hexKeyId == "" { + return 0, nil + } + + result, err := strconv.ParseUint(*hexKeyId, 16, 64) + if err != nil { + return 0, err + } + return result, nil +} + +var ( + errMoreThanOneKey = errors.New("more than one signing key in keyring") + errNoKeys = errors.New("no signing key in keyring") + errNoPassword = errors.New("key is encrypted but no passphrase was provided") +) + +func readSigningKey(keyFile, passphrase string) (*openpgp.Entity, error) { + fileContent, err := os.ReadFile(keyFile) + if err != nil { + return nil, fmt.Errorf("reading PGP key file: %w", err) + } + + var entityList openpgp.EntityList + + if isASCII(fileContent) { + entityList, err = openpgp.ReadArmoredKeyRing(bytes.NewReader(fileContent)) + if err != nil { + return nil, fmt.Errorf("decoding armored PGP keyring: %w", err) + } + } else { + entityList, err = openpgp.ReadKeyRing(bytes.NewReader(fileContent)) + if err != nil { + return nil, fmt.Errorf("decoding PGP keyring: %w", err) + } + } + var key *openpgp.Entity + + for _, candidate := range entityList { + if candidate.PrivateKey == nil { + continue + } + + if !candidate.PrivateKey.CanSign() { + continue + } + + if key != nil { + return nil, errMoreThanOneKey + } + + key = candidate + } + + if key == nil { + return nil, errNoKeys + } + + if key.PrivateKey.Encrypted { + if passphrase == "" { + return nil, errNoPassword + } + pw := []byte(passphrase) + err = key.PrivateKey.Decrypt(pw) + if err != nil { + return nil, fmt.Errorf("decrypt secret signing key: %w", err) + } + for _, sub := range key.Subkeys { + if sub.PrivateKey != nil { + if err := sub.PrivateKey.Decrypt(pw); err != nil { + return nil, fmt.Errorf("gopenpgp: error in unlocking sub key: %w", err) + } + } + } + } + + return key, nil +} + +func isASCII(s []byte) bool { + for i := 0; i < len(s); i++ { + if s[i] > unicode.MaxASCII { + return false + } + } + return true +} diff --git a/vendor/github.com/goreleaser/nfpm/v2/internal/sign/rsa.go b/vendor/github.com/goreleaser/nfpm/v2/internal/sign/rsa.go new file mode 100644 index 0000000000..6dd60e824a --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/internal/sign/rsa.go @@ -0,0 +1,122 @@ +package sign + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" // nolint:gosec + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io" + "os" +) + +var ( + errNoPemBlock = errors.New("no PEM block found") + errDigestNotSH1 = errors.New("digest is not a SHA1 hash") + errNoPassphrase = errors.New("key is encrypted but no passphrase was provided") + errNoRSAKey = errors.New("key is not an RSA key") +) + +// RSASignSHA1Digest signs the provided SHA1 message digest. The key file +// must be in the PEM format and can either be encrypted or not. +func RSASignSHA1Digest(sha1Digest []byte, keyFile, passphrase string) ([]byte, error) { + if len(sha1Digest) != sha1.Size { + return nil, errDigestNotSH1 + } + + keyFileContent, err := os.ReadFile(keyFile) + if err != nil { + return nil, fmt.Errorf("reading key file: %w", err) + } + + block, _ := pem.Decode(keyFileContent) + if block == nil { + return nil, errNoPemBlock + } + + blockData := block.Bytes + if x509.IsEncryptedPEMBlock(block) { //nolint:staticcheck + if passphrase == "" { + return nil, errNoPassphrase + } + + var decryptedBlockData []byte + + decryptedBlockData, err = x509.DecryptPEMBlock(block, []byte(passphrase)) //nolint:staticcheck + if err != nil { + return nil, fmt.Errorf("decrypt private key PEM block: %w", err) + } + + blockData = decryptedBlockData + } + + priv, err := x509.ParsePKCS1PrivateKey(blockData) + if err != nil { + return nil, fmt.Errorf("parse PKCS1 private key: %w", err) + } + + signature, err := priv.Sign(rand.Reader, sha1Digest, crypto.SHA1) + if err != nil { + return nil, fmt.Errorf("signing: %w", err) + } + + return signature, nil +} + +func rsaSign(message io.Reader, keyFile, passphrase string) ([]byte, error) { + sha1Hash := sha1.New() // nolint:gosec + _, err := io.Copy(sha1Hash, message) + if err != nil { + return nil, fmt.Errorf("create SHA1 message digest: %w", err) + } + + return RSASignSHA1Digest(sha1Hash.Sum(nil), keyFile, passphrase) +} + +// RSAVerifySHA1Digest is exported for use in tests and verifies a signature over the +// provided SHA1 hash of a message. The key file must be in the PEM format. +func RSAVerifySHA1Digest(sha1Digest, signature []byte, publicKeyFile string) error { + if len(sha1Digest) != sha1.Size { + return errDigestNotSH1 + } + + keyFileContent, err := os.ReadFile(publicKeyFile) + if err != nil { + return fmt.Errorf("reading key file: %w", err) + } + + block, _ := pem.Decode(keyFileContent) + if block == nil { + return errNoPemBlock + } + + pub, err := x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return fmt.Errorf("parse PKIX public key: %w", err) + } + + rsaPub, ok := pub.(*rsa.PublicKey) + if !ok { + return errNoRSAKey + } + + err = rsa.VerifyPKCS1v15(rsaPub, crypto.SHA1, sha1Digest, signature) + if err != nil { + return fmt.Errorf("verify PKCS1v15 signature: %w", err) + } + + return nil +} + +func rsaVerify(message io.Reader, signature []byte, publicKeyFile string) error { + sha1Hash := sha1.New() // nolint:gosec + _, err := io.Copy(sha1Hash, message) + if err != nil { + return fmt.Errorf("create SHA1 message digest: %w", err) + } + + return RSAVerifySHA1Digest(sha1Hash.Sum(nil), signature, publicKeyFile) +} diff --git a/vendor/github.com/goreleaser/nfpm/v2/nfpm.go b/vendor/github.com/goreleaser/nfpm/v2/nfpm.go new file mode 100644 index 0000000000..233141806f --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/nfpm.go @@ -0,0 +1,489 @@ +// Package nfpm provides ways to package programs in some linux packaging +// formats. +package nfpm + +import ( + "errors" + "fmt" + "io" + "io/fs" + "os" + "strings" + "sync" + + "github.com/AlekSi/pointer" + "github.com/Masterminds/semver/v3" + "github.com/goreleaser/chglog" + "github.com/goreleaser/nfpm/v2/files" + "github.com/imdario/mergo" + "gopkg.in/yaml.v3" +) + +// nolint: gochecknoglobals +var ( + packagers = map[string]Packager{} + lock sync.Mutex +) + +// RegisterPackager a new packager for the given format. +func RegisterPackager(format string, p Packager) { + lock.Lock() + defer lock.Unlock() + packagers[format] = p +} + +// ClearPackagers clear all registered packagers, used for testing. +func ClearPackagers() { + lock.Lock() + defer lock.Unlock() + packagers = map[string]Packager{} +} + +// ErrNoPackager happens when no packager is registered for the given format. +type ErrNoPackager struct { + format string +} + +func (e ErrNoPackager) Error() string { + return fmt.Sprintf("no packager registered for the format %s", e.format) +} + +// Get a packager for the given format. +func Get(format string) (Packager, error) { + p, ok := packagers[format] + if !ok { + return nil, ErrNoPackager{format} + } + return p, nil +} + +// Parse decodes YAML data from an io.Reader into a configuration struct. +func Parse(in io.Reader) (config Config, err error) { + return ParseWithEnvMapping(in, os.Getenv) +} + +// ParseWithEnvMapping decodes YAML data from an io.Reader into a configuration struct. +func ParseWithEnvMapping(in io.Reader, mapping func(string) string) (config Config, err error) { + dec := yaml.NewDecoder(in) + dec.KnownFields(true) + if err = dec.Decode(&config); err != nil { + return + } + config.envMappingFunc = mapping + if config.envMappingFunc == nil { + config.envMappingFunc = func(s string) string { return s } + } + + config.expandEnvVars() + + WithDefaults(&config.Info) + + return config, nil +} + +// ParseFile decodes YAML data from a file path into a configuration struct. +func ParseFile(path string) (config Config, err error) { + if path == "-" { + return ParseWithEnvMapping(os.Stdin, os.Getenv) + } + return ParseFileWithEnvMapping(path, os.Getenv) +} + +// ParseFileWithEnvMapping decodes YAML data from a file path into a configuration struct. +func ParseFileWithEnvMapping(path string, mapping func(string) string) (config Config, err error) { + var file *os.File + file, err = os.Open(path) //nolint:gosec + if err != nil { + return + } + defer file.Close() // nolint: errcheck,gosec + return ParseWithEnvMapping(file, mapping) +} + +// Packager represents any packager implementation. +type Packager interface { + Package(info *Info, w io.Writer) error + ConventionalFileName(info *Info) string +} + +type PackagerWithExtension interface { + Packager + ConventionalExtension() string +} + +// Config contains the top level configuration for packages. +type Config struct { + Info `yaml:",inline" json:",inline"` + Overrides map[string]*Overridables `yaml:"overrides,omitempty" json:"overrides,omitempty" jsonschema:"title=overrides,description=override some fields when packaging with a specific packager,enum=apk,enum=deb,enum=rpm"` + envMappingFunc func(string) string +} + +// Get returns the Info struct for the given packager format. Overrides +// for the given format are merged into the final struct. +func (c *Config) Get(format string) (info *Info, err error) { + info = &Info{} + // make a deep copy of info + if err = mergo.Merge(info, c.Info); err != nil { + return nil, fmt.Errorf("failed to merge config into info: %w", err) + } + override, ok := c.Overrides[format] + if !ok { + // no overrides + return info, nil + } + if err = mergo.Merge(&info.Overridables, override, mergo.WithOverride); err != nil { + return nil, fmt.Errorf("failed to merge overrides into info: %w", err) + } + var contents []*files.Content + for _, f := range info.Contents { + if f.Packager == format || f.Packager == "" { + contents = append(contents, f) + } + } + info.Contents = contents + return info, nil +} + +// Validate ensures that the config is well typed. +func (c *Config) Validate() error { + if err := Validate(&c.Info); err != nil { + return err + } + for format := range c.Overrides { + if _, err := Get(format); err != nil { + return err + } + } + return nil +} + +func (c *Config) expandEnvVarsStringSlice(items []string) []string { + for i, dep := range items { + val := strings.TrimSpace(os.Expand(dep, c.envMappingFunc)) + items[i] = val + } + for i := 0; i < len(items); i++ { + if items[i] == "" { + items = append(items[:i], items[i+1:]...) + i-- // Since we just deleted items[i], we must redo that index + } + } + + return items +} + +func (c *Config) expandEnvVars() { + // Version related fields + c.Info.Release = os.Expand(c.Info.Release, c.envMappingFunc) + c.Info.Version = os.Expand(c.Info.Version, c.envMappingFunc) + c.Info.Prerelease = os.Expand(c.Info.Prerelease, c.envMappingFunc) + c.Info.Platform = os.Expand(c.Info.Platform, c.envMappingFunc) + c.Info.Arch = os.Expand(c.Info.Arch, c.envMappingFunc) + for or := range c.Overrides { + c.Overrides[or].Conflicts = c.expandEnvVarsStringSlice(c.Overrides[or].Conflicts) + c.Overrides[or].Depends = c.expandEnvVarsStringSlice(c.Overrides[or].Depends) + c.Overrides[or].Replaces = c.expandEnvVarsStringSlice(c.Overrides[or].Replaces) + c.Overrides[or].Recommends = c.expandEnvVarsStringSlice(c.Overrides[or].Recommends) + c.Overrides[or].Provides = c.expandEnvVarsStringSlice(c.Overrides[or].Provides) + c.Overrides[or].Suggests = c.expandEnvVarsStringSlice(c.Overrides[or].Suggests) + } + c.Info.Conflicts = c.expandEnvVarsStringSlice(c.Info.Conflicts) + c.Info.Depends = c.expandEnvVarsStringSlice(c.Info.Depends) + c.Info.Replaces = c.expandEnvVarsStringSlice(c.Info.Replaces) + c.Info.Recommends = c.expandEnvVarsStringSlice(c.Info.Recommends) + c.Info.Provides = c.expandEnvVarsStringSlice(c.Info.Provides) + c.Info.Suggests = c.expandEnvVarsStringSlice(c.Info.Suggests) + + // Maintainer and vendor fields + c.Info.Name = os.Expand(c.Info.Name, c.envMappingFunc) + c.Info.Maintainer = os.Expand(c.Info.Maintainer, c.envMappingFunc) + c.Info.Vendor = os.Expand(c.Info.Vendor, c.envMappingFunc) + + // Package signing related fields + c.Info.Deb.Signature.KeyFile = os.Expand(c.Deb.Signature.KeyFile, c.envMappingFunc) + c.Info.RPM.Signature.KeyFile = os.Expand(c.RPM.Signature.KeyFile, c.envMappingFunc) + c.Info.APK.Signature.KeyFile = os.Expand(c.APK.Signature.KeyFile, c.envMappingFunc) + c.Info.Deb.Signature.KeyID = pointer.ToString(os.Expand(pointer.GetString(c.Deb.Signature.KeyID), c.envMappingFunc)) + c.Info.RPM.Signature.KeyID = pointer.ToString(os.Expand(pointer.GetString(c.RPM.Signature.KeyID), c.envMappingFunc)) + c.Info.APK.Signature.KeyID = pointer.ToString(os.Expand(pointer.GetString(c.APK.Signature.KeyID), c.envMappingFunc)) + + // Package signing passphrase + generalPassphrase := os.Expand("$NFPM_PASSPHRASE", c.envMappingFunc) + c.Info.Deb.Signature.KeyPassphrase = generalPassphrase + c.Info.RPM.Signature.KeyPassphrase = generalPassphrase + c.Info.APK.Signature.KeyPassphrase = generalPassphrase + + debPassphrase := os.Expand("$NFPM_DEB_PASSPHRASE", c.envMappingFunc) + if debPassphrase != "" { + c.Info.Deb.Signature.KeyPassphrase = debPassphrase + } + + rpmPassphrase := os.Expand("$NFPM_RPM_PASSPHRASE", c.envMappingFunc) + if rpmPassphrase != "" { + c.Info.RPM.Signature.KeyPassphrase = rpmPassphrase + } + + apkPassphrase := os.Expand("$NFPM_APK_PASSPHRASE", c.envMappingFunc) + if apkPassphrase != "" { + c.Info.APK.Signature.KeyPassphrase = apkPassphrase + } + + // RPM specific + c.Info.RPM.Packager = os.Expand(c.RPM.Packager, c.envMappingFunc) +} + +// Info contains information about a single package. +type Info struct { + Overridables `yaml:",inline" json:",inline"` + Name string `yaml:"name" json:"name" jsonschema:"title=package name"` + Arch string `yaml:"arch" json:"arch" jsonschema:"title=target architecture,example=amd64"` + Platform string `yaml:"platform,omitempty" json:"platform,omitempty" jsonschema:"title=target platform,example=linux,default=linux"` + Epoch string `yaml:"epoch,omitempty" json:"epoch,omitempty" jsonschema:"title=version epoch,example=2,default=extracted from version"` + Version string `yaml:"version" json:"version" jsonschema:"title=version,example=v1.0.2,example=2.0.1"` + VersionSchema string `yaml:"version_schema,omitempty" json:"version_schema,omitempty" jsonschema:"title=version schema,enum=semver,enum=none,default=semver"` + Release string `yaml:"release,omitempty" json:"release,omitempty" jsonschema:"title=version release,example=1"` + Prerelease string `yaml:"prerelease,omitempty" json:"prerelease,omitempty" jsonschema:"title=version prerelease,default=extracted from version"` + VersionMetadata string `yaml:"version_metadata,omitempty" json:"version_metadata,omitempty" jsonschema:"title=version metadata,example=git"` + Section string `yaml:"section,omitempty" json:"section,omitempty" jsonschema:"title=package section,example=default"` + Priority string `yaml:"priority,omitempty" json:"priority,omitempty" jsonschema:"title=package priority,example=extra"` + Maintainer string `yaml:"maintainer,omitempty" json:"maintainer,omitempty" jsonschema:"title=package maintainer,example=me@example.com"` + Description string `yaml:"description,omitempty" json:"description,omitempty" jsonschema:"title=package description"` + Vendor string `yaml:"vendor,omitempty" json:"vendor,omitempty" jsonschema:"title=package vendor,example=MyCorp"` + Homepage string `yaml:"homepage,omitempty" json:"homepage,omitempty" jsonschema:"title=package homepage,example=https://example.com"` + License string `yaml:"license,omitempty" json:"license,omitempty" jsonschema:"title=package license,example=MIT"` + Changelog string `yaml:"changelog,omitempty" json:"changelog,omitempty" jsonschema:"title=package changelog,example=changelog.yaml,description=see https://github.com/goreleaser/chglog for more details"` + DisableGlobbing bool `yaml:"disable_globbing,omitempty" json:"disable_globbing,omitempty" jsonschema:"title=whether to disable file globbing,default=false"` + Target string `yaml:"-" json:"-"` +} + +func (i *Info) Validate() error { + return Validate(i) +} + +// GetChangeLog parses the provided changelog file. +func (i *Info) GetChangeLog() (log *chglog.PackageChangeLog, err error) { + // if the file does not exist chglog.Parse will just silently + // create an empty changelog but we should notify the user instead + if _, err = os.Stat(i.Changelog); errors.Is(err, fs.ErrNotExist) { + return nil, err + } + + entries, err := chglog.Parse(i.Changelog) + if err != nil { + return nil, err + } + + return &chglog.PackageChangeLog{ + Name: i.Name, + Entries: entries, + }, nil +} + +func (i *Info) parseSemver() { + // parse the version as a semver so we can properly split the parts + // and support proper ordering for both rpm and deb + if v, err := semver.NewVersion(i.Version); err == nil { + i.Version = fmt.Sprintf("%d.%d.%d", v.Major(), v.Minor(), v.Patch()) + if i.Prerelease == "" { + i.Prerelease = v.Prerelease() + } + + if i.VersionMetadata == "" { + i.VersionMetadata = v.Metadata() + } + } +} + +// Overridables contain the field which are overridable in a package. +type Overridables struct { + Replaces []string `yaml:"replaces,omitempty" json:"replaces,omitempty" jsonschema:"title=replaces directive,example=nfpm"` + Provides []string `yaml:"provides,omitempty" json:"provides,omitempty" jsonschema:"title=provides directive,example=nfpm"` + Depends []string `yaml:"depends,omitempty" json:"depends,omitempty" jsonschema:"title=depends directive,example=nfpm"` + Recommends []string `yaml:"recommends,omitempty" json:"recommends,omitempty" jsonschema:"title=recommends directive,example=nfpm"` + Suggests []string `yaml:"suggests,omitempty" json:"suggests,omitempty" jsonschema:"title=suggests directive,example=nfpm"` + Conflicts []string `yaml:"conflicts,omitempty" json:"conflicts,omitempty" jsonschema:"title=conflicts directive,example=nfpm"` + Contents files.Contents `yaml:"contents,omitempty" json:"contents,omitempty" jsonschema:"title=files to add to the package"` + Scripts Scripts `yaml:"scripts,omitempty" json:"scripts,omitempty" jsonschema:"title=scripts to execute"` + RPM RPM `yaml:"rpm,omitempty" json:"rpm,omitempty" jsonschema:"title=rpm-specific settings"` + Deb Deb `yaml:"deb,omitempty" json:"deb,omitempty" jsonschema:"title=deb-specific settings"` + APK APK `yaml:"apk,omitempty" json:"apk,omitempty" jsonschema:"title=apk-specific settings"` + ArchLinux ArchLinux `yaml:"archlinux,omitempty" json:"archlinux,omitempty" jsonschema:"title=archlinux-specific settings"` +} + +type ArchLinux struct { + Pkgbase string `yaml:"pkgbase,omitempty" json:"pkgbase,omitempty" jsonschema:"title=explicitly specify the name used to refer to a split package, defaults to name"` + Arch string `yaml:"arch,omitempty" json:"arch,omitempty" jsonschema:"title=architecture in archlinux nomenclature"` + Packager string `yaml:"packager,omitempty" json:"packager,omitempty" jsonschema:"title=organization that packaged the software"` + Scripts ArchLinuxScripts `yaml:"scripts,omitempty" json:"scripts,omitempty" jsonschema:"title=archlinux-specific scripts"` +} + +type ArchLinuxScripts struct { + PreUpgrade string `yaml:"preupgrade,omitempty" json:"preupgrade,omitempty" jsonschema:"title=preupgrade script"` + PostUpgrade string `yaml:"postupgrade,omitempty" json:"postupgrade,omitempty" jsonschema:"title=postupgrade script"` +} + +// RPM is custom configs that are only available on RPM packages. +type RPM struct { + Arch string `yaml:"arch,omitempty" json:"arch,omitempty" jsonschema:"title=architecture in rpm nomenclature"` + Scripts RPMScripts `yaml:"scripts,omitempty" json:"scripts,omitempty" jsonschema:"title=rpm-specific scripts"` + Group string `yaml:"group,omitempty" json:"group,omitempty" jsonschema:"title=package group,example=Unspecified"` + Summary string `yaml:"summary,omitempty" json:"summary,omitempty" jsonschema:"title=package summary"` + Compression string `yaml:"compression,omitempty" json:"compression,omitempty" jsonschema:"title=compression algorithm to be used,enum=gzip,enum=lzma,enum=xz,default=gzip:-1"` + Signature RPMSignature `yaml:"signature,omitempty" json:"signature,omitempty" jsonschema:"title=rpm signature"` + Packager string `yaml:"packager,omitempty" json:"packager,omitempty" jsonschema:"title=organization that actually packaged the software"` +} + +// RPMScripts represents scripts only available on RPM packages. +type RPMScripts struct { + PreTrans string `yaml:"pretrans,omitempty" json:"pretrans,omitempty" jsonschema:"title=pretrans script"` + PostTrans string `yaml:"posttrans,omitempty" json:"posttrans,omitempty" jsonschema:"title=posttrans script"` +} + +type PackageSignature struct { + // PGP secret key, can be ASCII-armored + KeyFile string `yaml:"key_file,omitempty" json:"key_file,omitempty" jsonschema:"title=key file,example=key.gpg"` + KeyID *string `yaml:"key_id,omitempty" json:"key_id,omitempty" jsonschema:"title=key id,example=bc8acdd415bd80b3"` + KeyPassphrase string `yaml:"-" json:"-"` // populated from environment variable +} + +type RPMSignature struct { + PackageSignature `yaml:",inline" json:",inline"` +} + +type APK struct { + Arch string `yaml:"arch,omitempty" json:"arch,omitempty" jsonschema:"title=architecture in apk nomenclature"` + Signature APKSignature `yaml:"signature,omitempty" json:"signature,omitempty" jsonschema:"title=apk signature"` + Scripts APKScripts `yaml:"scripts,omitempty" json:"scripts,omitempty" jsonschema:"title=apk scripts"` +} + +type APKSignature struct { + PackageSignature `yaml:",inline" json:",inline"` + // defaults to .rsa.pub + KeyName string `yaml:"key_name,omitempty" json:"key_name,omitempty" jsonschema:"title=key name,example=origin,default=maintainer_email.rsa.pub"` +} + +type APKScripts struct { + PreUpgrade string `yaml:"preupgrade,omitempty" json:"preupgrade,omitempty" jsonschema:"title=pre upgrade script"` + PostUpgrade string `yaml:"postupgrade,omitempty" json:"postupgrade,omitempty" jsonschema:"title=post upgrade script"` +} + +// Deb is custom configs that are only available on deb packages. +type Deb struct { + Arch string `yaml:"arch,omitempty" json:"arch,omitempty" jsonschema:"title=architecture in deb nomenclature"` + Scripts DebScripts `yaml:"scripts,omitempty" json:"scripts,omitempty" jsonschema:"title=scripts"` + Triggers DebTriggers `yaml:"triggers,omitempty" json:"triggers,omitempty" jsonschema:"title=triggers"` + Breaks []string `yaml:"breaks,omitempty" json:"breaks,omitempty" jsonschema:"title=breaks"` + Signature DebSignature `yaml:"signature,omitempty" json:"signature,omitempty" jsonschema:"title=signature"` + Compression string `yaml:"compression,omitempty" json:"compression,omitempty" jsonschema:"title=compression algorithm to be used,enum=gzip,enum=xz,enum=none,default=gzip"` + Fields map[string]string `yaml:"fields,omitempty" json:"fields,omitempty" jsonschema:"title=fields"` +} + +type DebSignature struct { + PackageSignature `yaml:",inline" json:",inline"` + // debsign, or dpkg-sig (defaults to debsign) + Method string `yaml:"method,omitempty" json:"method,omitempty" jsonschema:"title=method role,enum=debsign,enum=dpkg-sig,default=debsign"` + // origin, maint or archive (defaults to origin) + Type string `yaml:"type,omitempty" json:"type,omitempty" jsonschema:"title=signer role,enum=origin,enum=maint,enum=archive,default=origin"` + Signer string `yaml:"signer,omitempty" json:"signer,omitempty" jsonschema:"title=signer"` +} + +// DebTriggers contains triggers only available for deb packages. +// https://wiki.debian.org/DpkgTriggers +// https://man7.org/linux/man-pages/man5/deb-triggers.5.html +type DebTriggers struct { + Interest []string `yaml:"interest,omitempty" json:"interest,omitempty" jsonschema:"title=interest"` + InterestAwait []string `yaml:"interest_await,omitempty" json:"interest_await,omitempty" jsonschema:"title=interest await"` + InterestNoAwait []string `yaml:"interest_noawait,omitempty" json:"interest_noawait,omitempty" jsonschema:"title=interest noawait"` + Activate []string `yaml:"activate,omitempty" json:"activate,omitempty" jsonschema:"title=activate"` + ActivateAwait []string `yaml:"activate_await,omitempty" json:"activate_await,omitempty" jsonschema:"title=activate await"` + ActivateNoAwait []string `yaml:"activate_noawait,omitempty" json:"activate_noawait,omitempty" jsonschema:"title=activate noawait"` +} + +// DebScripts is scripts only available on deb packages. +type DebScripts struct { + Rules string `yaml:"rules,omitempty" json:"rules,omitempty" jsonschema:"title=rules"` + Templates string `yaml:"templates,omitempty" json:"templates,omitempty" jsonschema:"title=templates"` + Config string `yaml:"config,omitempty" json:"config,omitempty" jsonschema:"title=config"` +} + +// Scripts contains information about maintainer scripts for packages. +type Scripts struct { + PreInstall string `yaml:"preinstall,omitempty" json:"preinstall,omitempty" jsonschema:"title=pre install"` + PostInstall string `yaml:"postinstall,omitempty" json:"postinstall,omitempty" jsonschema:"title=post install"` + PreRemove string `yaml:"preremove,omitempty" json:"preremove,omitempty" jsonschema:"title=pre remove"` + PostRemove string `yaml:"postremove,omitempty" json:"postremove,omitempty" jsonschema:"title=post remove"` +} + +// ErrFieldEmpty happens when some required field is empty. +type ErrFieldEmpty struct { + field string +} + +func (e ErrFieldEmpty) Error() string { + return fmt.Sprintf("package %s must be provided", e.field) +} + +// Validate the given Info and returns an error if it is invalid. +func Validate(info *Info) (err error) { + if info.Name == "" { + return ErrFieldEmpty{"name"} + } + if info.Arch == "" && (info.Deb.Arch == "" || info.RPM.Arch == "" || info.APK.Arch == "") { + return ErrFieldEmpty{"arch"} + } + if info.Version == "" { + return ErrFieldEmpty{"version"} + } + + contents, err := files.ExpandContentGlobs(info.Contents, info.DisableGlobbing) + if err != nil { + return err + } + + info.Contents = contents + return nil +} + +// WithDefaults set some sane defaults into the given Info. +func WithDefaults(info *Info) *Info { + if info.Platform == "" { + info.Platform = "linux" + } + if info.Description == "" { + info.Description = "no description given" + } + if info.Arch == "" { + info.Arch = "amd64" + } + if info.Version == "" { + info.Version = "v0.0.0-rc0" + } + + switch info.VersionSchema { + case "none": + // No change to the version or prerelease info set in the YAML file + break + case "semver": + fallthrough + default: + info.parseSemver() + } + + return info +} + +// ErrSigningFailure is returned whenever something went wrong during +// the package signing process. The underlying error can be unwrapped +// and could be crypto-related or something that occurred while adding +// the signature to the package. +type ErrSigningFailure struct { + Err error +} + +func (s *ErrSigningFailure) Error() string { + return fmt.Sprintf("signing error: %v", s.Err) +} + +func (s *ErrSigningFailure) Unwarp() error { + return s.Err +} diff --git a/vendor/github.com/goreleaser/nfpm/v2/rpm/rpm.go b/vendor/github.com/goreleaser/nfpm/v2/rpm/rpm.go new file mode 100644 index 0000000000..d9559baf8c --- /dev/null +++ b/vendor/github.com/goreleaser/nfpm/v2/rpm/rpm.go @@ -0,0 +1,414 @@ +// Package rpm implements nfpm.Packager providing .rpm bindings using +// google/rpmpack. +package rpm + +import ( + "bytes" + "fmt" + "io" + "os" + "strconv" + "strings" + "time" + + "github.com/goreleaser/chglog" + "github.com/goreleaser/nfpm/v2" + "github.com/goreleaser/nfpm/v2/files" + "github.com/goreleaser/nfpm/v2/internal/rpmpack" + "github.com/goreleaser/nfpm/v2/internal/sign" +) + +const ( + // https://github.com/rpm-software-management/rpm/blob/master/lib/rpmtag.h#L152 + tagChangelogTime = 1080 + // https://github.com/rpm-software-management/rpm/blob/master/lib/rpmtag.h#L153 + tagChangelogName = 1081 + // https://github.com/rpm-software-management/rpm/blob/master/lib/rpmtag.h#L154 + tagChangelogText = 1082 + + // Symbolic link + tagLink = 0o120000 + // Directory + tagDirectory = 0o40000 + + changelogNotesTemplate = ` +{{- range .Changes }}{{$note := splitList "\n" .Note}} +- {{ first $note }} +{{- range $i,$n := (rest $note) }}{{- if ne (trim $n) ""}} +{{$n}}{{end}} +{{- end}}{{- end}}` +) + +const packagerName = "rpm" + +// nolint: gochecknoinits +func init() { + nfpm.RegisterPackager(packagerName, Default) +} + +// Default RPM packager. +// nolint: gochecknoglobals +var Default = &RPM{} + +// RPM is a RPM packager implementation. +type RPM struct{} + +// https://docs.fedoraproject.org/ro/Fedora_Draft_Documentation/0.1/html/RPM_Guide/ch01s03.html +// nolint: gochecknoglobals +var archToRPM = map[string]string{ + "all": "noarch", + "amd64": "x86_64", + "386": "i386", + "arm64": "aarch64", + "arm5": "armv5tel", + "arm6": "armv6hl", + "arm7": "armv7hl", + "mips": "mips", + "mipsle": "mipsel", + // TODO: other arches +} + +func ensureValidArch(info *nfpm.Info) *nfpm.Info { + if info.RPM.Arch != "" { + info.Arch = info.RPM.Arch + } else if arch, ok := archToRPM[info.Arch]; ok { + info.Arch = arch + } + + return info +} + +// ConventionalFileName returns a file name according +// to the conventions for RPM packages. See: +// http://ftp.rpm.org/max-rpm/ch-rpm-file-format.html +func (*RPM) ConventionalFileName(info *nfpm.Info) string { + info = ensureValidArch(info) + + version := formatVersion(info) + if info.Release != "" { + version += "-" + info.Release + } + + // name-version-release.architecture.rpm + return fmt.Sprintf("%s-%s.%s.rpm", info.Name, version, info.Arch) +} + +// ConventionalExtension returns the file name conventionally used for RPM packages +func (*RPM) ConventionalExtension() string { + return ".rpm" +} + +// Package writes a new RPM package to the given writer using the given info. +func (*RPM) Package(info *nfpm.Info, w io.Writer) (err error) { + var ( + meta *rpmpack.RPMMetaData + rpm *rpmpack.RPM + ) + info = ensureValidArch(info) + if err = info.Validate(); err != nil { + return err + } + + if meta, err = buildRPMMeta(info); err != nil { + return err + } + if rpm, err = rpmpack.NewRPM(*meta); err != nil { + return err + } + + if info.RPM.Signature.KeyFile != "" { + rpm.SetPGPSigner(sign.PGPSignerWithKeyID(info.RPM.Signature.KeyFile, info.RPM.Signature.KeyPassphrase, info.RPM.Signature.KeyID)) + } + + if err = createFilesInsideRPM(info, rpm); err != nil { + return err + } + + if err = addScriptFiles(info, rpm); err != nil { + return err + } + + if info.Changelog != "" { + if err = addChangeLog(info, rpm); err != nil { + return err + } + } + + if err = rpm.Write(w); err != nil { + return err + } + + return nil +} + +func addChangeLog(info *nfpm.Info, rpm *rpmpack.RPM) error { + changelog, err := info.GetChangeLog() + if err != nil { + return fmt.Errorf("reading changelog: %w", err) + } + + if len(changelog.Entries) == 0 { + // no nothing because creating empty tags + // would result in an invalid package + return nil + } + + tpl, err := chglog.LoadTemplateData(changelogNotesTemplate) + if err != nil { + return fmt.Errorf("parsing RPM changelog template: %w", err) + } + + changes := make([]string, len(changelog.Entries)) + titles := make([]string, len(changelog.Entries)) + times := make([]uint32, len(changelog.Entries)) + for idx, entry := range changelog.Entries { + var formattedNotes bytes.Buffer + + err := tpl.Execute(&formattedNotes, entry) + if err != nil { + return fmt.Errorf("formatting changelog notes: %w", err) + } + + changes[idx] = strings.TrimSpace(formattedNotes.String()) + times[idx] = uint32(entry.Date.Unix()) + titles[idx] = fmt.Sprintf("%s - %s", entry.Packager, entry.Semver) + } + + rpm.AddCustomTag(tagChangelogTime, rpmpack.EntryUint32(times)) + rpm.AddCustomTag(tagChangelogName, rpmpack.EntryStringSlice(titles)) + rpm.AddCustomTag(tagChangelogText, rpmpack.EntryStringSlice(changes)) + + return nil +} + +//nolint:funlen +func buildRPMMeta(info *nfpm.Info) (*rpmpack.RPMMetaData, error) { + var ( + err error + epoch uint64 + provides, + depends, + recommends, + replaces, + suggests, + conflicts rpmpack.Relations + ) + if info.RPM.Compression == "" { + info.RPM.Compression = "gzip:-1" + } + if epoch, err = strconv.ParseUint(defaultTo(info.Epoch, "0"), 10, 32); err != nil { + return nil, err + } + if provides, err = toRelation(info.Provides); err != nil { + return nil, err + } + if depends, err = toRelation(info.Depends); err != nil { + return nil, err + } + if recommends, err = toRelation(info.Recommends); err != nil { + return nil, err + } + if replaces, err = toRelation(info.Replaces); err != nil { + return nil, err + } + if suggests, err = toRelation(info.Suggests); err != nil { + return nil, err + } + if conflicts, err = toRelation(info.Conflicts); err != nil { + return nil, err + } + + hostname, err := os.Hostname() + if err != nil { + return nil, err + } + + return &rpmpack.RPMMetaData{ + Name: info.Name, + Summary: defaultTo(info.RPM.Summary, strings.Split(info.Description, "\n")[0]), + Description: info.Description, + Version: formatVersion(info), + Release: defaultTo(info.Release, "1"), + Epoch: uint32(epoch), + Arch: info.Arch, + OS: info.Platform, + Licence: info.License, + URL: info.Homepage, + Vendor: info.Vendor, + Packager: defaultTo(info.RPM.Packager, info.Maintainer), + Group: info.RPM.Group, + Provides: provides, + Recommends: recommends, + Requires: depends, + Obsoletes: replaces, + Suggests: suggests, + Conflicts: conflicts, + Compressor: info.RPM.Compression, + BuildTime: time.Now(), + BuildHost: hostname, + }, nil +} + +func formatVersion(info *nfpm.Info) string { + version := info.Version + + if info.Prerelease != "" { + version += "~" + info.Prerelease + } + + if info.VersionMetadata != "" { + version += "+" + info.VersionMetadata + } + + return version +} + +func defaultTo(in, def string) string { + if in == "" { + return def + } + return in +} + +func toRelation(items []string) (rpmpack.Relations, error) { + relations := make(rpmpack.Relations, 0) + for idx := range items { + if err := relations.Set(items[idx]); err != nil { + return nil, err + } + } + + return relations, nil +} + +func addScriptFiles(info *nfpm.Info, rpm *rpmpack.RPM) error { + if info.RPM.Scripts.PreTrans != "" { + data, err := os.ReadFile(info.RPM.Scripts.PreTrans) + if err != nil { + return err + } + rpm.AddPretrans(string(data)) + } + if info.Scripts.PreInstall != "" { + data, err := os.ReadFile(info.Scripts.PreInstall) + if err != nil { + return err + } + rpm.AddPrein(string(data)) + } + + if info.Scripts.PreRemove != "" { + data, err := os.ReadFile(info.Scripts.PreRemove) + if err != nil { + return err + } + rpm.AddPreun(string(data)) + } + + if info.Scripts.PostInstall != "" { + data, err := os.ReadFile(info.Scripts.PostInstall) + if err != nil { + return err + } + rpm.AddPostin(string(data)) + } + + if info.Scripts.PostRemove != "" { + data, err := os.ReadFile(info.Scripts.PostRemove) + if err != nil { + return err + } + rpm.AddPostun(string(data)) + } + + if info.RPM.Scripts.PostTrans != "" { + data, err := os.ReadFile(info.RPM.Scripts.PostTrans) + if err != nil { + return err + } + rpm.AddPosttrans(string(data)) + } + + return nil +} + +func createFilesInsideRPM(info *nfpm.Info, rpm *rpmpack.RPM) (err error) { + for _, content := range info.Contents { + if content.Packager != "" && content.Packager != packagerName { + continue + } + + var file *rpmpack.RPMFile + + switch content.Type { + case "config": + file, err = asRPMFile(content, rpmpack.ConfigFile) + case "config|noreplace": + file, err = asRPMFile(content, rpmpack.ConfigFile|rpmpack.NoReplaceFile) + case "ghost": + if content.FileInfo.Mode == 0 { + content.FileInfo.Mode = os.FileMode(0o644) + } + + file, err = asRPMFile(content, rpmpack.GhostFile) + case "doc": + file, err = asRPMFile(content, rpmpack.DocFile) + case "licence", "license": + file, err = asRPMFile(content, rpmpack.LicenceFile) + case "readme": + file, err = asRPMFile(content, rpmpack.ReadmeFile) + case "symlink": + file = asRPMSymlink(content) + case "dir": + file, err = asRPMDirectory(content) + default: + file, err = asRPMFile(content, rpmpack.GenericFile) + } + + if err != nil { + return err + } + + rpm.AddFile(*file) + } + + return nil +} + +func asRPMDirectory(content *files.Content) (*rpmpack.RPMFile, error) { + return &rpmpack.RPMFile{ + Name: content.Destination, + Mode: uint(content.Mode()) | tagDirectory, + MTime: uint32(time.Now().Unix()), + Owner: content.FileInfo.Owner, + Group: content.FileInfo.Group, + }, nil +} + +func asRPMSymlink(content *files.Content) *rpmpack.RPMFile { + return &rpmpack.RPMFile{ + Name: content.Destination, + Body: []byte(content.Source), + Mode: uint(tagLink), + MTime: uint32(content.FileInfo.MTime.Unix()), + Owner: content.FileInfo.Owner, + Group: content.FileInfo.Group, + } +} + +func asRPMFile(content *files.Content, fileType rpmpack.FileType) (*rpmpack.RPMFile, error) { + data, err := os.ReadFile(content.Source) + if err != nil && content.Type != "ghost" { + return nil, err + } + + return &rpmpack.RPMFile{ + Name: content.Destination, + Body: data, + Mode: uint(content.FileInfo.Mode), + MTime: uint32(content.FileInfo.MTime.Unix()), + Owner: content.FileInfo.Owner, + Group: content.FileInfo.Group, + Type: fileType, + }, nil +} diff --git a/vendor/github.com/iancoleman/orderedmap/LICENSE b/vendor/github.com/iancoleman/orderedmap/LICENSE new file mode 100644 index 0000000000..2732e3795b --- /dev/null +++ b/vendor/github.com/iancoleman/orderedmap/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017 Ian Coleman + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, Subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or Substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/iancoleman/orderedmap/orderedmap.go b/vendor/github.com/iancoleman/orderedmap/orderedmap.go new file mode 100644 index 0000000000..656e354f90 --- /dev/null +++ b/vendor/github.com/iancoleman/orderedmap/orderedmap.go @@ -0,0 +1,329 @@ +package orderedmap + +import ( + "encoding/json" + "errors" + "sort" + "strings" +) + +var NoValueError = errors.New("No value for this key") + +type KeyIndex struct { + Key string + Index int +} +type ByIndex []KeyIndex + +func (a ByIndex) Len() int { return len(a) } +func (a ByIndex) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a ByIndex) Less(i, j int) bool { return a[i].Index < a[j].Index } + +type Pair struct { + key string + value interface{} +} + +func (kv *Pair) Key() string { + return kv.key +} + +func (kv *Pair) Value() interface{} { + return kv.value +} + +type ByPair struct { + Pairs []*Pair + LessFunc func(a *Pair, j *Pair) bool +} + +func (a ByPair) Len() int { return len(a.Pairs) } +func (a ByPair) Swap(i, j int) { a.Pairs[i], a.Pairs[j] = a.Pairs[j], a.Pairs[i] } +func (a ByPair) Less(i, j int) bool { return a.LessFunc(a.Pairs[i], a.Pairs[j]) } + +type OrderedMap struct { + keys []string + values map[string]interface{} +} + +func New() *OrderedMap { + o := OrderedMap{} + o.keys = []string{} + o.values = map[string]interface{}{} + return &o +} + +func (o *OrderedMap) Get(key string) (interface{}, bool) { + val, exists := o.values[key] + return val, exists +} + +func (o *OrderedMap) Set(key string, value interface{}) { + _, exists := o.values[key] + if !exists { + o.keys = append(o.keys, key) + } + o.values[key] = value +} + +func (o *OrderedMap) Delete(key string) { + // check key is in use + _, ok := o.values[key] + if !ok { + return + } + // remove from keys + for i, k := range o.keys { + if k == key { + o.keys = append(o.keys[:i], o.keys[i+1:]...) + break + } + } + // remove from values + delete(o.values, key) +} + +func (o *OrderedMap) Keys() []string { + return o.keys +} + +// SortKeys Sort the map keys using your sort func +func (o *OrderedMap) SortKeys(sortFunc func(keys []string)) { + sortFunc(o.keys) +} + +// Sort Sort the map using your sort func +func (o *OrderedMap) Sort(lessFunc func(a *Pair, b *Pair) bool) { + pairs := make([]*Pair, len(o.keys)) + for i, key := range o.keys { + pairs[i] = &Pair{key, o.values[key]} + } + + sort.Sort(ByPair{pairs, lessFunc}) + + for i, pair := range pairs { + o.keys[i] = pair.key + } +} + +func (o *OrderedMap) UnmarshalJSON(b []byte) error { + if o.values == nil { + o.values = map[string]interface{}{} + } + var err error + err = mapStringToOrderedMap(string(b), o) + if err != nil { + return err + } + return nil +} + +func mapStringToOrderedMap(s string, o *OrderedMap) error { + // parse string into map + m := map[string]interface{}{} + err := json.Unmarshal([]byte(s), &m) + if err != nil { + return err + } + // Get the order of the keys + orderedKeys := []KeyIndex{} + for k, _ := range m { + kEscaped := strings.Replace(k, `"`, `\"`, -1) + kQuoted := `"` + kEscaped + `"` + // Find how much content exists before this key. + // If all content from this key and after is replaced with a close + // brace, it should still form a valid json string. + sTrimmed := s + for len(sTrimmed) > 0 { + lastIndex := strings.LastIndex(sTrimmed, kQuoted) + if lastIndex == -1 { + break + } + sTrimmed = sTrimmed[0:lastIndex] + sTrimmed = strings.TrimSpace(sTrimmed) + if len(sTrimmed) > 0 && sTrimmed[len(sTrimmed)-1] == ',' { + sTrimmed = sTrimmed[0 : len(sTrimmed)-1] + } + maybeValidJson := sTrimmed + "}" + testMap := map[string]interface{}{} + err := json.Unmarshal([]byte(maybeValidJson), &testMap) + if err == nil { + // record the position of this key in s + ki := KeyIndex{ + Key: k, + Index: len(sTrimmed), + } + orderedKeys = append(orderedKeys, ki) + // shorten the string to get the next key + startOfValueIndex := lastIndex + len(kQuoted) + valueStr := s[startOfValueIndex : len(s)-1] + valueStr = strings.TrimSpace(valueStr) + if len(valueStr) > 0 && valueStr[0] == ':' { + valueStr = valueStr[1:len(valueStr)] + } + valueStr = strings.TrimSpace(valueStr) + if valueStr[0] == '{' { + // if the value for this key is a map + // find end of valueStr by removing everything after last } + // until it forms valid json + hasValidJson := false + i := 1 + for i < len(valueStr) && !hasValidJson { + if valueStr[i] != '}' { + i = i + 1 + continue + } + subTestMap := map[string]interface{}{} + testValue := valueStr[0 : i+1] + err = json.Unmarshal([]byte(testValue), &subTestMap) + if err == nil { + hasValidJson = true + valueStr = testValue + break + } + i = i + 1 + } + // convert to orderedmap + // this may be recursive it values in the map are also maps + if hasValidJson { + newMap := New() + err := mapStringToOrderedMap(valueStr, newMap) + if err != nil { + return err + } + m[k] = *newMap + } + } else if valueStr[0] == '[' { + // if the value for this key is a slice + // find end of valueStr by removing everything after last ] + // until it forms valid json + hasValidJson := false + i := 1 + for i < len(valueStr) && !hasValidJson { + if valueStr[i] != ']' { + i = i + 1 + continue + } + subTestSlice := []interface{}{} + testValue := valueStr[0 : i+1] + err = json.Unmarshal([]byte(testValue), &subTestSlice) + if err == nil { + hasValidJson = true + valueStr = testValue + break + } + i = i + 1 + } + // convert to slice with any map items converted to + // orderedmaps + // this may be recursive if values in the slice are slices + if hasValidJson { + newSlice := []interface{}{} + err := sliceStringToSliceWithOrderedMaps(valueStr, &newSlice) + if err != nil { + return err + } + m[k] = newSlice + } + } else { + o.Set(k, m[k]) + } + break + } + } + } + // Sort the keys + sort.Sort(ByIndex(orderedKeys)) + // Convert sorted keys to string slice + k := []string{} + for _, ki := range orderedKeys { + k = append(k, ki.Key) + } + // Set the OrderedMap values + o.values = m + o.keys = k + return nil +} + +func sliceStringToSliceWithOrderedMaps(valueStr string, newSlice *[]interface{}) error { + // if the value for this key is a []interface, convert any map items to an orderedmap. + // find end of valueStr by removing everything after last ] + // until it forms valid json + itemsStr := strings.TrimSpace(valueStr) + itemsStr = itemsStr[1 : len(itemsStr)-1] + // get next item in the slice + itemIndex := 0 + startItem := 0 + endItem := 0 + for endItem <= len(itemsStr) { + couldBeItemEnd := false + couldBeItemEnd = couldBeItemEnd || endItem == len(itemsStr) + couldBeItemEnd = couldBeItemEnd || (endItem < len(itemsStr) && itemsStr[endItem] == ',') + if !couldBeItemEnd { + endItem = endItem + 1 + continue + } + // if this substring compiles to json, it's the next item + possibleItemStr := strings.TrimSpace(itemsStr[startItem:endItem]) + var possibleItem interface{} + err := json.Unmarshal([]byte(possibleItemStr), &possibleItem) + if err != nil { + endItem = endItem + 1 + continue + } + if possibleItemStr[0] == '{' { + // if item is map, convert to orderedmap + oo := New() + err := mapStringToOrderedMap(possibleItemStr, oo) + if err != nil { + return err + } + // add new orderedmap item to new slice + slice := *newSlice + slice = append(slice, *oo) + *newSlice = slice + } else if possibleItemStr[0] == '[' { + // if item is slice, convert to slice with orderedmaps + newItem := []interface{}{} + err := sliceStringToSliceWithOrderedMaps(possibleItemStr, &newItem) + if err != nil { + return err + } + // replace original slice item with new slice + slice := *newSlice + slice = append(slice, newItem) + *newSlice = slice + } else { + // any non-slice and non-map item, just add json parsed item + slice := *newSlice + slice = append(slice, possibleItem) + *newSlice = slice + } + // remove this item from itemsStr + startItem = endItem + 1 + endItem = endItem + 1 + itemIndex = itemIndex + 1 + } + return nil +} + +func (o OrderedMap) MarshalJSON() ([]byte, error) { + s := "{" + for _, k := range o.keys { + // add key + kEscaped := strings.Replace(k, `"`, `\"`, -1) + s = s + `"` + kEscaped + `":` + // add value + v := o.values[k] + vBytes, err := json.Marshal(v) + if err != nil { + return []byte{}, err + } + s = s + string(vBytes) + "," + } + if len(o.keys) > 0 { + s = s[0 : len(s)-1] + } + s = s + "}" + return []byte(s), nil +} diff --git a/vendor/github.com/iancoleman/orderedmap/readme.md b/vendor/github.com/iancoleman/orderedmap/readme.md new file mode 100644 index 0000000000..895632e1b1 --- /dev/null +++ b/vendor/github.com/iancoleman/orderedmap/readme.md @@ -0,0 +1,73 @@ +# orderedmap + +A golang data type equivalent to python's collections.OrderedDict + +Retains order of keys in maps + +Can be JSON serialized / deserialized + +# Usage + +```go +package main + +import ( + "encoding/json" + "github.com/iancoleman/orderedmap" +) + +func main() { + + // use New() instead of o := map[string]interface{}{} + o := orderedmap.New() + + // use Set instead of o["a"] = 1 + o.Set("a", 1) + + // use Get instead of i, ok := o["a"] + val, ok := o.Get("a") + + // use Keys instead of for k, v := range o + key := o.Keys() + for _, k := range keys { + v, _ := o.Get(k) + } + + // use o.Delete instead of delete(o, key) + err := o.Delete("a") + + // serialize to a json string using encoding/json + bytes, err := json.Marshal(o) + prettyBytes, err := json.MarshalIndent(o) + + // deserialize a json string using encoding/json + // all maps (including nested maps) will be parsed as orderedmaps + s := `{"a": 1}` + err := json.Unmarshal([]byte(s), &o) + + // sort the keys + o.SortKeys(sort.Strings) + + // sort by Pair + o.Sort(func(a *Pair, b *Pair) bool { + return a.Value().(float64) < b.Value().(float64) + }) +} +``` + +# Caveats + +* OrderedMap only takes strings for the key, as per [the JSON spec](http://json.org/). + +# Tests + +``` +go test +``` + +# Alternatives + +None of the alternatives offer JSON serialization. + +* [cevaris/ordered_map](https://github.com/cevaris/ordered_map) +* [mantyr/iterator](https://github.com/mantyr/iterator) diff --git a/vendor/github.com/invopop/jsonschema/.golangci.yml b/vendor/github.com/invopop/jsonschema/.golangci.yml new file mode 100644 index 0000000000..f1922d81ec --- /dev/null +++ b/vendor/github.com/invopop/jsonschema/.golangci.yml @@ -0,0 +1,89 @@ +run: + tests: true + max-same-issues: 50 + skip-dirs: + - resources + - old + skip-files: + - cmd/protopkg/main.go + +output: + print-issued-lines: false + +linters: + enable-all: true + disable: + - maligned + - megacheck + - lll + - typecheck # `go build` catches this, and it doesn't currently work with Go 1.11 modules + - goimports # horrendously slow with go modules :( + - dupl # has never been actually useful + - gochecknoglobals + - gochecknoinits + - interfacer # author deprecated it because it provides bad suggestions + - funlen + - whitespace + - godox + - wsl + - dogsled + - gomnd + - gocognit + - gocyclo + - scopelint + - godot + - nestif + - testpackage + - goerr113 + - gci + - gofumpt + - exhaustivestruct + - nlreturn + - forbidigo + - cyclop + - paralleltest + - ifshort # so annoying + - golint + - tagliatelle + - forcetypeassert + - wrapcheck + - revive + - structcheck + - stylecheck + - exhaustive + - varnamelen + +linters-settings: + govet: + check-shadowing: true + use-installed-packages: true + dupl: + threshold: 100 + goconst: + min-len: 8 + min-occurrences: 3 + gocyclo: + min-complexity: 20 + gocritic: + disabled-checks: + - ifElseChain + + +issues: + max-per-linter: 0 + max-same: 0 + exclude-use-default: false + exclude: + # Captured by errcheck. + - '^(G104|G204):' + # Very commonly not checked. + - 'Error return value of .(.*\.Help|.*\.MarkFlagRequired|(os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*Print(f|ln|)|os\.(Un)?Setenv). is not checked' + # Weird error only seen on Kochiku... + - 'internal error: no range for' + - 'exported method `.*\.(MarshalJSON|UnmarshalJSON|URN|Payload|GoString|Close|Provides|Requires|ExcludeFromHash|MarshalText|UnmarshalText|Description|Check|Poll|Severity)` should have comment or be unexported' + - 'composite literal uses unkeyed fields' + - 'declaration of "err" shadows declaration' + - 'by other packages, and that stutters' + - 'Potential file inclusion via variable' + - 'at least one file in a package should have a package comment' + - 'bad syntax for struct tag pair' diff --git a/vendor/github.com/invopop/jsonschema/COPYING b/vendor/github.com/invopop/jsonschema/COPYING new file mode 100644 index 0000000000..2993ec085d --- /dev/null +++ b/vendor/github.com/invopop/jsonschema/COPYING @@ -0,0 +1,19 @@ +Copyright (C) 2014 Alec Thomas + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/invopop/jsonschema/README.md b/vendor/github.com/invopop/jsonschema/README.md new file mode 100644 index 0000000000..100ad0c7ed --- /dev/null +++ b/vendor/github.com/invopop/jsonschema/README.md @@ -0,0 +1,363 @@ +# Go JSON Schema Reflection + +[![Lint](https://github.com/invopop/jsonschema/actions/workflows/lint.yaml/badge.svg)](https://github.com/invopop/jsonschema/actions/workflows/lint.yaml) +[![Test Go](https://github.com/invopop/jsonschema/actions/workflows/test.yaml/badge.svg)](https://github.com/invopop/jsonschema/actions/workflows/test.yaml) +[![Go Report Card](https://goreportcard.com/badge/github.com/invopop/jsonschema)](https://goreportcard.com/report/github.com/invopop/jsonschema) +[![GoDoc](https://godoc.org/github.com/invopop/jsonschema?status.svg)](https://godoc.org/github.com/invopop/jsonschema) +![Latest Tag](https://img.shields.io/github/v/tag/invopop/jsonschema) + +This package can be used to generate [JSON Schemas](http://json-schema.org/latest/json-schema-validation.html) from Go types through reflection. + +- Supports arbitrarily complex types, including `interface{}`, maps, slices, etc. +- Supports json-schema features such as minLength, maxLength, pattern, format, etc. +- Supports simple string and numeric enums. +- Supports custom property fields via the `jsonschema_extras` struct tag. + +This repository is a fork of the original [jsonschema](https://github.com/alecthomas/jsonschema) by [@alecthomas](https://github.com/alecthomas). At [Invopop](https://invopop.com) we use jsonschema as a cornerstone in our [GOBL library](https://github.com/invopop/gobl), and wanted to be able to continue building and adding features without taking up Alec's time. There have been a few significant changes that probably mean this version is a not compatible with with Alec's: + +- The original was stuck on the draft-04 version of JSON Schema, we've now moved to the latest JSON Schema Draft 2020-12. +- Schema IDs are added automatically from the current Go package's URL in order to be unique, and can be disabled with the `Anonymous` option. +- Support for the `FullyQualifyTypeName` option has been removed. If you have conflicts, you should use multiple schema files with different IDs, set the `DoNotReference` option to true to hide definitions completely, or add your own naming strategy using the `Namer` property. +- Support for `yaml` tags and related options has been dropped for the sake of simplification. There were a [few inconsistencies](https://github.com/invopop/jsonschema/pull/21) around this that have now been fixed. + +## Versions + +This project is still under v0 scheme, as per Go convention, breaking changes are likely. Please pin go modules to branches, and reach out if you think something can be improved. + +## Example + +The following Go type: + +```go +type TestUser struct { + ID int `json:"id"` + Name string `json:"name" jsonschema:"title=the name,description=The name of a friend,example=joe,example=lucy,default=alex"` + Friends []int `json:"friends,omitempty" jsonschema_description:"The list of IDs, omitted when empty"` + Tags map[string]interface{} `json:"tags,omitempty" jsonschema_extras:"a=b,foo=bar,foo=bar1"` + BirthDate time.Time `json:"birth_date,omitempty" jsonschema:"oneof_required=date"` + YearOfBirth string `json:"year_of_birth,omitempty" jsonschema:"oneof_required=year"` + Metadata interface{} `json:"metadata,omitempty" jsonschema:"oneof_type=string;array"` + FavColor string `json:"fav_color,omitempty" jsonschema:"enum=red,enum=green,enum=blue"` +} +``` + +Results in following JSON Schema: + +```go +jsonschema.Reflect(&TestUser{}) +``` + +```json +{ + "$schema": "http://json-schema.org/draft/2020-12/schema", + "$ref": "#/$defs/SampleUser", + "$defs": { + "SampleUser": { + "oneOf": [ + { + "required": ["birth_date"], + "title": "date" + }, + { + "required": ["year_of_birth"], + "title": "year" + } + ], + "properties": { + "id": { + "type": "integer" + }, + "name": { + "type": "string", + "title": "the name", + "description": "The name of a friend", + "default": "alex", + "examples": ["joe", "lucy"] + }, + "friends": { + "items": { + "type": "integer" + }, + "type": "array", + "description": "The list of IDs, omitted when empty" + }, + "tags": { + "type": "object", + "a": "b", + "foo": ["bar", "bar1"] + }, + "birth_date": { + "type": "string", + "format": "date-time" + }, + "year_of_birth": { + "type": "string" + }, + "metadata": { + "oneOf": [ + { + "type": "string" + }, + { + "type": "array" + } + ] + }, + "fav_color": { + "type": "string", + "enum": ["red", "green", "blue"] + } + }, + "additionalProperties": false, + "type": "object", + "required": ["id", "name"] + } + } +} +``` + +## YAML + +Support for `yaml` tags has now been removed. If you feel very strongly about this, we've opened a discussion to hear your comments: https://github.com/invopop/jsonschema/discussions/28 + +The recommended approach if you need to deal with YAML data is to first convert to JSON. The [invopop/yaml](https://github.com/invopop/yaml) library will make this trivial. + +## Configurable behaviour + +The behaviour of the schema generator can be altered with parameters when a `jsonschema.Reflector` +instance is created. + +### ExpandedStruct + +If set to `true`, makes the top level struct not to reference itself in the definitions. But type passed should be a struct type. + +eg. + +```go +type GrandfatherType struct { + FamilyName string `json:"family_name" jsonschema:"required"` +} + +type SomeBaseType struct { + SomeBaseProperty int `json:"some_base_property"` + // The jsonschema required tag is nonsensical for private and ignored properties. + // Their presence here tests that the fields *will not* be required in the output + // schema, even if they are tagged required. + somePrivateBaseProperty string `json:"i_am_private" jsonschema:"required"` + SomeIgnoredBaseProperty string `json:"-" jsonschema:"required"` + SomeSchemaIgnoredProperty string `jsonschema:"-,required"` + SomeUntaggedBaseProperty bool `jsonschema:"required"` + someUnexportedUntaggedBaseProperty bool + Grandfather GrandfatherType `json:"grand"` +} +``` + +will output: + +```json +{ + "$schema": "http://json-schema.org/draft/2020-12/schema", + "required": ["some_base_property", "grand", "SomeUntaggedBaseProperty"], + "properties": { + "SomeUntaggedBaseProperty": { + "type": "boolean" + }, + "grand": { + "$schema": "http://json-schema.org/draft/2020-12/schema", + "$ref": "#/definitions/GrandfatherType" + }, + "some_base_property": { + "type": "integer" + } + }, + "type": "object", + "$defs": { + "GrandfatherType": { + "required": ["family_name"], + "properties": { + "family_name": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object" + } + } +} +``` + +### Using Go Comments + +Writing a good schema with descriptions inside tags can become cumbersome and tedious, especially if you already have some Go comments around your types and field definitions. If you'd like to take advantage of these existing comments, you can use the `AddGoComments(base, path string)` method that forms part of the reflector to parse your go files and automatically generate a dictionary of Go import paths, types, and fields, to individual comments. These will then be used automatically as description fields, and can be overridden with a manual definition if needed. + +Take a simplified example of a User struct which for the sake of simplicity we assume is defined inside this package: + +```go +package main + +// User is used as a base to provide tests for comments. +type User struct { + // Unique sequential identifier. + ID int `json:"id" jsonschema:"required"` + // Name of the user + Name string `json:"name"` +} +``` + +To get the comments provided into your JSON schema, use a regular `Reflector` and add the go code using an import module URL and path. Fully qualified go module paths cannot be determined reliably by the `go/parser` library, so we need to introduce this manually: + +```go +r := new(Reflector) +if err := r.AddGoComments("github.com/invopop/jsonschema", "./"); err != nil { + // deal with error +} +s := r.Reflect(&User{}) +// output +``` + +Expect the results to be similar to: + +```json +{ + "$schema": "http://json-schema.org/draft/2020-12/schema", + "$ref": "#/$defs/User", + "$defs": { + "User": { + "required": ["id"], + "properties": { + "id": { + "type": "integer", + "description": "Unique sequential identifier." + }, + "name": { + "type": "string", + "description": "Name of the user" + } + }, + "additionalProperties": false, + "type": "object", + "description": "User is used as a base to provide tests for comments." + } + } +} +``` + +### Custom Key Naming + +In some situations, the keys actually used to write files are different from Go structs'. + +This is often the case when writing a configuration file to YAML or JSON from a Go struct, or when returning a JSON response for a Web API: APIs typically use snake_case, while Go uses PascalCase. + +You can pass a `func(string) string` function to `Reflector`'s `KeyNamer` option to map Go field names to JSON key names and reflect the aforementioned transformations, without having to specify `json:"..."` on every struct field. + +For example, consider the following struct + +```go +type User struct { + GivenName string + PasswordSalted []byte `json:"salted_password"` +} +``` + +We can transform field names to snake_case in the generated JSON schema: + +```go +r := new(jsonschema.Reflector) +r.KeyNamer = strcase.SnakeCase // from package github.com/stoewer/go-strcase + +r.Reflect(&User{}) +``` + +Will yield + +```diff + { + "$schema": "http://json-schema.org/draft/2020-12/schema", + "$ref": "#/$defs/User", + "$defs": { + "User": { + "properties": { +- "GivenName": { ++ "given_name": { + "type": "string" + }, + "salted_password": { + "type": "string", + "contentEncoding": "base64" + } + }, + "additionalProperties": false, + "type": "object", +- "required": ["GivenName", "salted_password"] ++ "required": ["given_name", "salted_password"] + } + } + } +``` + +As you can see, if a field name has a `json:""` tag set, the `key` argument to `KeyNamer` will have the value of that tag. + +### Custom Type Definitions + +Sometimes it can be useful to have custom JSON Marshal and Unmarshal methods in your structs that automatically convert for example a string into an object. + +To override auto-generating an object type for your type, implement the `JSONSchema() *Schema` method and whatever is defined will be provided in the schema definitions. + +Take the following simplified example of a `CompactDate` that only includes the Year and Month: + +```go +type CompactDate struct { + Year int + Month int +} + +func (d *CompactDate) UnmarshalJSON(data []byte) error { + if len(data) != 9 { + return errors.New("invalid compact date length") + } + var err error + d.Year, err = strconv.Atoi(string(data[1:5])) + if err != nil { + return err + } + d.Month, err = strconv.Atoi(string(data[7:8])) + if err != nil { + return err + } + return nil +} + +func (d *CompactDate) MarshalJSON() ([]byte, error) { + buf := new(bytes.Buffer) + buf.WriteByte('"') + buf.WriteString(fmt.Sprintf("%d-%02d", d.Year, d.Month)) + buf.WriteByte('"') + return buf.Bytes(), nil +} + +func (CompactDate) JSONSchema() *Schema { + return &Schema{ + Type: "string", + Title: "Compact Date", + Description: "Short date that only includes year and month", + Pattern: "^[0-9]{4}-[0-1][0-9]$", + } +} +``` + +The resulting schema generated for this struct would look like: + +```json +{ + "$schema": "http://json-schema.org/draft/2020-12/schema", + "$ref": "#/$defs/CompactDate", + "$defs": { + "CompactDate": { + "pattern": "^[0-9]{4}-[0-1][0-9]$", + "type": "string", + "title": "Compact Date", + "description": "Short date that only includes year and month" + } + } +} +``` diff --git a/vendor/github.com/invopop/jsonschema/comment_extractor.go b/vendor/github.com/invopop/jsonschema/comment_extractor.go new file mode 100644 index 0000000000..0088b412a2 --- /dev/null +++ b/vendor/github.com/invopop/jsonschema/comment_extractor.go @@ -0,0 +1,90 @@ +package jsonschema + +import ( + "fmt" + "io/fs" + gopath "path" + "path/filepath" + "strings" + + "go/ast" + "go/doc" + "go/parser" + "go/token" +) + +// ExtractGoComments will read all the go files contained in the provided path, +// including sub-directories, in order to generate a dictionary of comments +// associated with Types and Fields. The results will be added to the `commentsMap` +// provided in the parameters and expected to be used for Schema "description" fields. +// +// The `go/parser` library is used to extract all the comments and unfortunately doesn't +// have a built-in way to determine the fully qualified name of a package. The `base` paremeter, +// the URL used to import that package, is thus required to be able to match reflected types. +// +// When parsing type comments, we use the `go/doc`'s Synopsis method to extract the first phrase +// only. Field comments, which tend to be much shorter, will include everything. +func ExtractGoComments(base, path string, commentMap map[string]string) error { + fset := token.NewFileSet() + dict := make(map[string][]*ast.Package) + err := filepath.Walk(path, func(path string, info fs.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + d, err := parser.ParseDir(fset, path, nil, parser.ParseComments) + if err != nil { + return err + } + for _, v := range d { + // paths may have multiple packages, like for tests + k := gopath.Join(base, path) + dict[k] = append(dict[k], v) + } + } + return nil + }) + if err != nil { + return err + } + + for pkg, p := range dict { + for _, f := range p { + gtxt := "" + typ := "" + ast.Inspect(f, func(n ast.Node) bool { + switch x := n.(type) { + case *ast.TypeSpec: + typ = x.Name.String() + if !ast.IsExported(typ) { + typ = "" + } else { + txt := x.Doc.Text() + if txt == "" && gtxt != "" { + txt = gtxt + gtxt = "" + } + txt = doc.Synopsis(txt) + commentMap[fmt.Sprintf("%s.%s", pkg, typ)] = strings.TrimSpace(txt) + } + case *ast.Field: + txt := x.Doc.Text() + if typ != "" && txt != "" { + for _, n := range x.Names { + if ast.IsExported(n.String()) { + k := fmt.Sprintf("%s.%s.%s", pkg, typ, n) + commentMap[k] = strings.TrimSpace(txt) + } + } + } + case *ast.GenDecl: + // remember for the next type + gtxt = x.Doc.Text() + } + return true + }) + } + } + + return nil +} diff --git a/vendor/github.com/invopop/jsonschema/id.go b/vendor/github.com/invopop/jsonschema/id.go new file mode 100644 index 0000000000..73fafb38d0 --- /dev/null +++ b/vendor/github.com/invopop/jsonschema/id.go @@ -0,0 +1,76 @@ +package jsonschema + +import ( + "errors" + "fmt" + "net/url" + "strings" +) + +// ID represents a Schema ID type which should always be a URI. +// See draft-bhutton-json-schema-00 section 8.2.1 +type ID string + +// EmptyID is used to explicitly define an ID with no value. +const EmptyID ID = "" + +// Validate is used to check if the ID looks like a proper schema. +// This is done by parsing the ID as a URL and checking it has all the +// relevant parts. +func (id ID) Validate() error { + u, err := url.Parse(id.String()) + if err != nil { + return fmt.Errorf("invalid URL: %w", err) + } + if u.Hostname() == "" { + return errors.New("missing hostname") + } + if !strings.Contains(u.Hostname(), ".") { + return errors.New("hostname does not look valid") + } + if u.Path == "" { + return errors.New("path is expected") + } + if u.Scheme != "https" && u.Scheme != "http" { + return errors.New("unexpected schema") + } + return nil +} + +// Anchor sets the anchor part of the schema URI. +func (id ID) Anchor(name string) ID { + b := id.Base() + return ID(b.String() + "#" + name) +} + +// Def adds or replaces a definition identifier. +func (id ID) Def(name string) ID { + b := id.Base() + return ID(b.String() + "#/$defs/" + name) +} + +// Add appends the provided path to the id, and removes any +// anchor data that might be there. +func (id ID) Add(path string) ID { + b := id.Base() + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + return ID(b.String() + path) +} + +// Base removes any anchor information from the schema +func (id ID) Base() ID { + s := id.String() + i := strings.LastIndex(s, "#") + if i != -1 { + s = s[0:i] + } + s = strings.TrimRight(s, "/") + return ID(s) +} + +// String provides string version of ID +func (id ID) String() string { + return string(id) +} diff --git a/vendor/github.com/invopop/jsonschema/reflect.go b/vendor/github.com/invopop/jsonschema/reflect.go new file mode 100644 index 0000000000..6ebc6be56e --- /dev/null +++ b/vendor/github.com/invopop/jsonschema/reflect.go @@ -0,0 +1,1133 @@ +// Package jsonschema uses reflection to generate JSON Schemas from Go types [1]. +// +// If json tags are present on struct fields, they will be used to infer +// property names and if a property is required (omitempty is present). +// +// [1] http://json-schema.org/latest/json-schema-validation.html +package jsonschema + +import ( + "bytes" + "encoding/json" + "net" + "net/url" + "reflect" + "strconv" + "strings" + "time" + + "github.com/iancoleman/orderedmap" +) + +// Version is the JSON Schema version. +var Version = "https://json-schema.org/draft/2020-12/schema" + +// Schema represents a JSON Schema object type. +// RFC draft-bhutton-json-schema-00 section 4.3 +type Schema struct { + // RFC draft-bhutton-json-schema-00 + Version string `json:"$schema,omitempty"` // section 8.1.1 + ID ID `json:"$id,omitempty"` // section 8.2.1 + Anchor string `json:"$anchor,omitempty"` // section 8.2.2 + Ref string `json:"$ref,omitempty"` // section 8.2.3.1 + DynamicRef string `json:"$dynamicRef,omitempty"` // section 8.2.3.2 + Definitions Definitions `json:"$defs,omitempty"` // section 8.2.4 + Comments string `json:"$comment,omitempty"` // section 8.3 + // RFC draft-bhutton-json-schema-00 section 10.2.1 (Sub-schemas with logic) + AllOf []*Schema `json:"allOf,omitempty"` // section 10.2.1.1 + AnyOf []*Schema `json:"anyOf,omitempty"` // section 10.2.1.2 + OneOf []*Schema `json:"oneOf,omitempty"` // section 10.2.1.3 + Not *Schema `json:"not,omitempty"` // section 10.2.1.4 + // RFC draft-bhutton-json-schema-00 section 10.2.2 (Apply sub-schemas conditionally) + If *Schema `json:"if,omitempty"` // section 10.2.2.1 + Then *Schema `json:"then,omitempty"` // section 10.2.2.2 + Else *Schema `json:"else,omitempty"` // section 10.2.2.3 + DependentSchemas map[string]*Schema `json:"dependentSchemas,omitempty"` // section 10.2.2.4 + // RFC draft-bhutton-json-schema-00 section 10.3.1 (arrays) + PrefixItems []*Schema `json:"prefixItems,omitempty"` // section 10.3.1.1 + Items *Schema `json:"items,omitempty"` // section 10.3.1.2 (replaces additionalItems) + Contains *Schema `json:"contains,omitempty"` // section 10.3.1.3 + // RFC draft-bhutton-json-schema-00 section 10.3.2 (sub-schemas) + Properties *orderedmap.OrderedMap `json:"properties,omitempty"` // section 10.3.2.1 + PatternProperties map[string]*Schema `json:"patternProperties,omitempty"` // section 10.3.2.2 + AdditionalProperties *Schema `json:"additionalProperties,omitempty"` // section 10.3.2.3 + PropertyNames *Schema `json:"propertyNames,omitempty"` // section 10.3.2.4 + // RFC draft-bhutton-json-schema-validation-00, section 6 + Type string `json:"type,omitempty"` // section 6.1.1 + Enum []interface{} `json:"enum,omitempty"` // section 6.1.2 + Const interface{} `json:"const,omitempty"` // section 6.1.3 + MultipleOf int `json:"multipleOf,omitempty"` // section 6.2.1 + Maximum int `json:"maximum,omitempty"` // section 6.2.2 + ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` // section 6.2.3 + Minimum int `json:"minimum,omitempty"` // section 6.2.4 + ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` // section 6.2.5 + MaxLength int `json:"maxLength,omitempty"` // section 6.3.1 + MinLength int `json:"minLength,omitempty"` // section 6.3.2 + Pattern string `json:"pattern,omitempty"` // section 6.3.3 + MaxItems int `json:"maxItems,omitempty"` // section 6.4.1 + MinItems int `json:"minItems,omitempty"` // section 6.4.2 + UniqueItems bool `json:"uniqueItems,omitempty"` // section 6.4.3 + MaxContains uint `json:"maxContains,omitempty"` // section 6.4.4 + MinContains uint `json:"minContains,omitempty"` // section 6.4.5 + MaxProperties int `json:"maxProperties,omitempty"` // section 6.5.1 + MinProperties int `json:"minProperties,omitempty"` // section 6.5.2 + Required []string `json:"required,omitempty"` // section 6.5.3 + DependentRequired map[string][]string `json:"dependentRequired,omitempty"` // section 6.5.4 + // RFC draft-bhutton-json-schema-validation-00, section 7 + Format string `json:"format,omitempty"` + // RFC draft-bhutton-json-schema-validation-00, section 8 + ContentEncoding string `json:"contentEncoding,omitempty"` // section 8.3 + ContentMediaType string `json:"contentMediaType,omitempty"` // section 8.4 + ContentSchema *Schema `json:"contentSchema,omitempty"` // section 8.5 + // RFC draft-bhutton-json-schema-validation-00, section 9 + Title string `json:"title,omitempty"` // section 9.1 + Description string `json:"description,omitempty"` // section 9.1 + Default interface{} `json:"default,omitempty"` // section 9.2 + Deprecated bool `json:"deprecated,omitempty"` // section 9.3 + ReadOnly bool `json:"readOnly,omitempty"` // section 9.4 + WriteOnly bool `json:"writeOnly,omitempty"` // section 9.4 + Examples []interface{} `json:"examples,omitempty"` // section 9.5 + + Extras map[string]interface{} `json:"-"` + + // Special boolean representation of the Schema - section 4.3.2 + boolean *bool +} + +var ( + // TrueSchema defines a schema with a true value + TrueSchema = &Schema{boolean: &[]bool{true}[0]} + // FalseSchema defines a schema with a false value + FalseSchema = &Schema{boolean: &[]bool{false}[0]} +) + +// customSchemaImpl is used to detect if the type provides it's own +// custom Schema Type definition to use instead. Very useful for situations +// where there are custom JSON Marshal and Unmarshal methods. +type customSchemaImpl interface { + JSONSchema() *Schema +} + +// Function to be run after the schema has been generated. +// this will let you modify a schema afterwards +type extendSchemaImpl interface { + JSONSchemaExtend(*Schema) +} + +var customType = reflect.TypeOf((*customSchemaImpl)(nil)).Elem() +var extendType = reflect.TypeOf((*extendSchemaImpl)(nil)).Elem() + +// customSchemaGetFieldDocString +type customSchemaGetFieldDocString interface { + GetFieldDocString(fieldName string) string +} + +type customGetFieldDocString func(fieldName string) string + +var customStructGetFieldDocString = reflect.TypeOf((*customSchemaGetFieldDocString)(nil)).Elem() + +// Reflect reflects to Schema from a value using the default Reflector +func Reflect(v interface{}) *Schema { + return ReflectFromType(reflect.TypeOf(v)) +} + +// ReflectFromType generates root schema using the default Reflector +func ReflectFromType(t reflect.Type) *Schema { + r := &Reflector{} + return r.ReflectFromType(t) +} + +// A Reflector reflects values into a Schema. +type Reflector struct { + // BaseSchemaID defines the URI that will be used as a base to determine Schema + // IDs for models. For example, a base Schema ID of `https://invopop.com/schemas` + // when defined with a struct called `User{}`, will result in a schema with an + // ID set to `https://invopop.com/schemas/user`. + // + // If no `BaseSchemaID` is provided, we'll take the type's complete package path + // and use that as a base instead. Set `Anonymous` to try if you do not want to + // include a schema ID. + BaseSchemaID ID + + // Anonymous when true will hide the auto-generated Schema ID and provide what is + // known as an "anonymous schema". As a rule, this is not recommended. + Anonymous bool + + // AssignAnchor when true will use the original struct's name as an anchor inside + // every definition, including the root schema. These can be useful for having a + // reference to the original struct's name in CamelCase instead of the snake-case used + // by default for URI compatibility. + // + // Anchors do not appear to be widely used out in the wild, so at this time the + // anchors themselves will not be used inside generated schema. + AssignAnchor bool + + // AllowAdditionalProperties will cause the Reflector to generate a schema + // without additionalProperties set to 'false' for all struct types. This means + // the presence of additional keys in JSON objects will not cause validation + // to fail. Note said additional keys will simply be dropped when the + // validated JSON is unmarshaled. + AllowAdditionalProperties bool + + // RequiredFromJSONSchemaTags will cause the Reflector to generate a schema + // that requires any key tagged with `jsonschema:required`, overriding the + // default of requiring any key *not* tagged with `json:,omitempty`. + RequiredFromJSONSchemaTags bool + + // Do not reference definitions. This will remove the top-level $defs map and + // instead cause the entire structure of types to be output in one tree. The + // list of type definitions (`$defs`) will not be included. + DoNotReference bool + + // ExpandedStruct when true will include the reflected type's definition in the + // root as opposed to a definition with a reference. + ExpandedStruct bool + + // IgnoredTypes defines a slice of types that should be ignored in the schema, + // switching to just allowing additional properties instead. + IgnoredTypes []interface{} + + // Lookup allows a function to be defined that will provide a custom mapping of + // types to Schema IDs. This allows existing schema documents to be referenced + // by their ID instead of being embedded into the current schema definitions. + // Reflected types will never be pointers, only underlying elements. + Lookup func(reflect.Type) ID + + // Mapper is a function that can be used to map custom Go types to jsonschema schemas. + Mapper func(reflect.Type) *Schema + + // Namer allows customizing of type names. The default is to use the type's name + // provided by the reflect package. + Namer func(reflect.Type) string + + // KeyNamer allows customizing of key names. + // The default is to use the key's name as is, or the json tag if present. + // If a json tag is present, KeyNamer will receive the tag's name as an argument, not the original key name. + KeyNamer func(string) string + + // AdditionalFields allows adding structfields for a given type + AdditionalFields func(reflect.Type) []reflect.StructField + + // CommentMap is a dictionary of fully qualified go types and fields to comment + // strings that will be used if a description has not already been provided in + // the tags. Types and fields are added to the package path using "." as a + // separator. + // + // Type descriptions should be defined like: + // + // map[string]string{"github.com/invopop/jsonschema.Reflector": "A Reflector reflects values into a Schema."} + // + // And Fields defined as: + // + // map[string]string{"github.com/invopop/jsonschema.Reflector.DoNotReference": "Do not reference definitions."} + // + // See also: AddGoComments + CommentMap map[string]string +} + +// Reflect reflects to Schema from a value. +func (r *Reflector) Reflect(v interface{}) *Schema { + return r.ReflectFromType(reflect.TypeOf(v)) +} + +// ReflectFromType generates root schema +func (r *Reflector) ReflectFromType(t reflect.Type) *Schema { + if t.Kind() == reflect.Ptr { + t = t.Elem() // re-assign from pointer + } + + name := r.typeName(t) + + s := new(Schema) + definitions := Definitions{} + s.Definitions = definitions + bs := r.reflectTypeToSchemaWithID(definitions, t) + if r.ExpandedStruct { + *s = *definitions[name] + delete(definitions, name) + } else { + *s = *bs + } + + // Attempt to set the schema ID + if !r.Anonymous && s.ID == EmptyID { + baseSchemaID := r.BaseSchemaID + if baseSchemaID == EmptyID { + id := ID("https://" + t.PkgPath()) + if err := id.Validate(); err == nil { + // it's okay to silently ignore URL errors + baseSchemaID = id + } + } + if baseSchemaID != EmptyID { + s.ID = baseSchemaID.Add(ToSnakeCase(name)) + } + } + + s.Version = Version + if !r.DoNotReference { + s.Definitions = definitions + } + + return s +} + +// Definitions hold schema definitions. +// http://json-schema.org/latest/json-schema-validation.html#rfc.section.5.26 +// RFC draft-wright-json-schema-validation-00, section 5.26 +type Definitions map[string]*Schema + +// Available Go defined types for JSON Schema Validation. +// RFC draft-wright-json-schema-validation-00, section 7.3 +var ( + timeType = reflect.TypeOf(time.Time{}) // date-time RFC section 7.3.1 + ipType = reflect.TypeOf(net.IP{}) // ipv4 and ipv6 RFC section 7.3.4, 7.3.5 + uriType = reflect.TypeOf(url.URL{}) // uri RFC section 7.3.6 +) + +// Byte slices will be encoded as base64 +var byteSliceType = reflect.TypeOf([]byte(nil)) + +// Except for json.RawMessage +var rawMessageType = reflect.TypeOf(json.RawMessage{}) + +// Go code generated from protobuf enum types should fulfil this interface. +type protoEnum interface { + EnumDescriptor() ([]byte, []int) +} + +var protoEnumType = reflect.TypeOf((*protoEnum)(nil)).Elem() + +// SetBaseSchemaID is a helper use to be able to set the reflectors base +// schema ID from a string as opposed to then ID instance. +func (r *Reflector) SetBaseSchemaID(id string) { + r.BaseSchemaID = ID(id) +} + +func (r *Reflector) refOrReflectTypeToSchema(definitions Definitions, t reflect.Type) *Schema { + id := r.lookupID(t) + if id != EmptyID { + return &Schema{ + Ref: id.String(), + } + } + + // Already added to definitions? + if def := r.refDefinition(definitions, t); def != nil { + return def + } + + return r.reflectTypeToSchemaWithID(definitions, t) +} + +func (r *Reflector) reflectTypeToSchemaWithID(defs Definitions, t reflect.Type) *Schema { + s := r.reflectTypeToSchema(defs, t) + if s != nil { + if r.Lookup != nil { + id := r.Lookup(t) + if id != EmptyID { + s.ID = id + } + } + } + return s +} + +func (r *Reflector) reflectTypeToSchema(definitions Definitions, t reflect.Type) *Schema { + // only try to reflect non-pointers + if t.Kind() == reflect.Ptr { + return r.refOrReflectTypeToSchema(definitions, t.Elem()) + } + + // Do any pre-definitions exist? + if r.Mapper != nil { + if t := r.Mapper(t); t != nil { + return t + } + } + if rt := r.reflectCustomSchema(definitions, t); rt != nil { + return rt + } + + // Prepare a base to which details can be added + st := new(Schema) + + // jsonpb will marshal protobuf enum options as either strings or integers. + // It will unmarshal either. + if t.Implements(protoEnumType) { + st.OneOf = []*Schema{ + {Type: "string"}, + {Type: "integer"}, + } + return st + } + + // Defined format types for JSON Schema Validation + // RFC draft-wright-json-schema-validation-00, section 7.3 + // TODO email RFC section 7.3.2, hostname RFC section 7.3.3, uriref RFC section 7.3.7 + if t == ipType { + // TODO differentiate ipv4 and ipv6 RFC section 7.3.4, 7.3.5 + st.Type = "string" + st.Format = "ipv4" + return st + } + + switch t.Kind() { + case reflect.Struct: + r.reflectStruct(definitions, t, st) + + case reflect.Slice, reflect.Array: + r.reflectSliceOrArray(definitions, t, st) + + case reflect.Map: + r.reflectMap(definitions, t, st) + + case reflect.Interface: + // empty + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + st.Type = "integer" + + case reflect.Float32, reflect.Float64: + st.Type = "number" + + case reflect.Bool: + st.Type = "boolean" + + case reflect.String: + st.Type = "string" + + default: + panic("unsupported type " + t.String()) + } + + r.reflectSchemaExtend(definitions, t, st) + + // Always try to reference the definition which may have just been created + if def := r.refDefinition(definitions, t); def != nil { + return def + } + + return st +} + +func (r *Reflector) reflectCustomSchema(definitions Definitions, t reflect.Type) *Schema { + if t.Kind() == reflect.Ptr { + return r.reflectCustomSchema(definitions, t.Elem()) + } + + if t.Implements(customType) { + v := reflect.New(t) + o := v.Interface().(customSchemaImpl) + st := o.JSONSchema() + r.addDefinition(definitions, t, st) + if ref := r.refDefinition(definitions, t); ref != nil { + return ref + } + return st + } + + return nil +} + +func (r *Reflector) reflectSchemaExtend(definitions Definitions, t reflect.Type, s *Schema) *Schema { + if t.Implements(extendType) { + v := reflect.New(t) + o := v.Interface().(extendSchemaImpl) + o.JSONSchemaExtend(s) + if ref := r.refDefinition(definitions, t); ref != nil { + return ref + } + } + + return s +} + +func (r *Reflector) reflectSliceOrArray(definitions Definitions, t reflect.Type, st *Schema) { + if t == rawMessageType { + return + } + + r.addDefinition(definitions, t, st) + + if st.Description == "" { + st.Description = r.lookupComment(t, "") + } + + if t.Kind() == reflect.Array { + st.MinItems = t.Len() + st.MaxItems = st.MinItems + } + if t.Kind() == reflect.Slice && t.Elem() == byteSliceType.Elem() { + st.Type = "string" + // NOTE: ContentMediaType is not set here + st.ContentEncoding = "base64" + } else { + st.Type = "array" + st.Items = r.refOrReflectTypeToSchema(definitions, t.Elem()) + } +} + +func (r *Reflector) reflectMap(definitions Definitions, t reflect.Type, st *Schema) { + r.addDefinition(definitions, t, st) + + st.Type = "object" + if st.Description == "" { + st.Description = r.lookupComment(t, "") + } + + switch t.Key().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + st.PatternProperties = map[string]*Schema{ + "^[0-9]+$": r.refOrReflectTypeToSchema(definitions, t.Elem()), + } + st.AdditionalProperties = FalseSchema + return + } + if t.Elem().Kind() != reflect.Interface { + st.PatternProperties = map[string]*Schema{ + ".*": r.refOrReflectTypeToSchema(definitions, t.Elem()), + } + } +} + +// Reflects a struct to a JSON Schema type. +func (r *Reflector) reflectStruct(definitions Definitions, t reflect.Type, s *Schema) { + // Handle special types + switch t { + case timeType: // date-time RFC section 7.3.1 + s.Type = "string" + s.Format = "date-time" + return + case uriType: // uri RFC section 7.3.6 + s.Type = "string" + s.Format = "uri" + return + } + + r.addDefinition(definitions, t, s) + s.Type = "object" + s.Properties = orderedmap.New() + s.Description = r.lookupComment(t, "") + if r.AssignAnchor { + s.Anchor = t.Name() + } + if !r.AllowAdditionalProperties { + s.AdditionalProperties = FalseSchema + } + + ignored := false + for _, it := range r.IgnoredTypes { + if reflect.TypeOf(it) == t { + ignored = true + break + } + } + if !ignored { + r.reflectStructFields(s, definitions, t) + } +} + +func (r *Reflector) reflectStructFields(st *Schema, definitions Definitions, t reflect.Type) { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return + } + + var getFieldDocString customGetFieldDocString + if t.Implements(customStructGetFieldDocString) { + v := reflect.New(t) + o := v.Interface().(customSchemaGetFieldDocString) + getFieldDocString = o.GetFieldDocString + } + + handleField := func(f reflect.StructField) { + name, shouldEmbed, required, nullable := r.reflectFieldName(f) + // if anonymous and exported type should be processed recursively + // current type should inherit properties of anonymous one + if name == "" { + if shouldEmbed { + r.reflectStructFields(st, definitions, f.Type) + } + return + } + + property := r.refOrReflectTypeToSchema(definitions, f.Type) + property.structKeywordsFromTags(f, st, name) + if property.Description == "" { + property.Description = r.lookupComment(t, f.Name) + } + if getFieldDocString != nil { + property.Description = getFieldDocString(f.Name) + } + + if nullable { + property = &Schema{ + OneOf: []*Schema{ + property, + { + Type: "null", + }, + }, + } + } + + st.Properties.Set(name, property) + if required { + st.Required = appendUniqueString(st.Required, name) + } + } + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + handleField(f) + } + if r.AdditionalFields != nil { + if af := r.AdditionalFields(t); af != nil { + for _, sf := range af { + handleField(sf) + } + } + } +} + +func appendUniqueString(base []string, value string) []string { + for _, v := range base { + if v == value { + return base + } + } + return append(base, value) +} + +func (r *Reflector) lookupComment(t reflect.Type, name string) string { + if r.CommentMap == nil { + return "" + } + + n := fullyQualifiedTypeName(t) + if name != "" { + n = n + "." + name + } + + return r.CommentMap[n] +} + +// addDefinition will append the provided schema. If needed, an ID and anchor will also be added. +func (r *Reflector) addDefinition(definitions Definitions, t reflect.Type, s *Schema) { + name := r.typeName(t) + if name == "" { + return + } + definitions[name] = s +} + +// refDefinition will provide a schema with a reference to an existing definition. +func (r *Reflector) refDefinition(definitions Definitions, t reflect.Type) *Schema { + if r.DoNotReference { + return nil + } + name := r.typeName(t) + if name == "" { + return nil + } + if _, ok := definitions[name]; !ok { + return nil + } + return &Schema{ + Ref: "#/$defs/" + name, + } +} + +func (r *Reflector) lookupID(t reflect.Type) ID { + if r.Lookup != nil { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return r.Lookup(t) + + } + return EmptyID +} + +func (t *Schema) structKeywordsFromTags(f reflect.StructField, parent *Schema, propertyName string) { + t.Description = f.Tag.Get("jsonschema_description") + + tags := splitOnUnescapedCommas(f.Tag.Get("jsonschema")) + t.genericKeywords(tags, parent, propertyName) + + switch t.Type { + case "string": + t.stringKeywords(tags) + case "number": + t.numbericKeywords(tags) + case "integer": + t.numbericKeywords(tags) + case "array": + t.arrayKeywords(tags) + case "boolean": + t.booleanKeywords(tags) + } + extras := strings.Split(f.Tag.Get("jsonschema_extras"), ",") + t.extraKeywords(extras) +} + +// read struct tags for generic keyworks +func (t *Schema) genericKeywords(tags []string, parent *Schema, propertyName string) { + for _, tag := range tags { + nameValue := strings.Split(tag, "=") + if len(nameValue) == 2 { + name, val := nameValue[0], nameValue[1] + switch name { + case "title": + t.Title = val + case "description": + t.Description = val + case "type": + t.Type = val + case "anchor": + t.Anchor = val + case "oneof_required": + var typeFound *Schema + for i := range parent.OneOf { + if parent.OneOf[i].Title == nameValue[1] { + typeFound = parent.OneOf[i] + } + } + if typeFound == nil { + typeFound = &Schema{ + Title: nameValue[1], + Required: []string{}, + } + parent.OneOf = append(parent.OneOf, typeFound) + } + typeFound.Required = append(typeFound.Required, propertyName) + case "anyof_required": + var typeFound *Schema + for i := range parent.AnyOf { + if parent.AnyOf[i].Title == nameValue[1] { + typeFound = parent.AnyOf[i] + } + } + if typeFound == nil { + typeFound = &Schema{ + Title: nameValue[1], + Required: []string{}, + } + parent.AnyOf = append(parent.AnyOf, typeFound) + } + typeFound.Required = append(typeFound.Required, propertyName) + case "oneof_type": + if t.OneOf == nil { + t.OneOf = make([]*Schema, 0, 1) + } + t.Type = "" + types := strings.Split(nameValue[1], ";") + for _, ty := range types { + t.OneOf = append(t.OneOf, &Schema{ + Type: ty, + }) + } + case "anyof_type": + if t.AnyOf == nil { + t.AnyOf = make([]*Schema, 0, 1) + } + t.Type = "" + types := strings.Split(nameValue[1], ";") + for _, ty := range types { + t.AnyOf = append(t.AnyOf, &Schema{ + Type: ty, + }) + } + case "enum": + switch t.Type { + case "string": + t.Enum = append(t.Enum, val) + case "integer": + i, _ := strconv.Atoi(val) + t.Enum = append(t.Enum, i) + case "number": + f, _ := strconv.ParseFloat(val, 64) + t.Enum = append(t.Enum, f) + } + } + } + } +} + +// read struct tags for boolean type keyworks +func (t *Schema) booleanKeywords(tags []string) { + for _, tag := range tags { + nameValue := strings.Split(tag, "=") + if len(nameValue) != 2 { + continue + } + name, val := nameValue[0], nameValue[1] + if name == "default" { + if val == "true" { + t.Default = true + } else if val == "false" { + t.Default = false + } + } + } +} + +// read struct tags for string type keyworks +func (t *Schema) stringKeywords(tags []string) { + for _, tag := range tags { + nameValue := strings.Split(tag, "=") + if len(nameValue) == 2 { + name, val := nameValue[0], nameValue[1] + switch name { + case "minLength": + i, _ := strconv.Atoi(val) + t.MinLength = i + case "maxLength": + i, _ := strconv.Atoi(val) + t.MaxLength = i + case "pattern": + t.Pattern = val + case "format": + switch val { + case "date-time", "email", "hostname", "ipv4", "ipv6", "uri", "uuid": + t.Format = val + break + } + case "readOnly": + i, _ := strconv.ParseBool(val) + t.ReadOnly = i + case "writeOnly": + i, _ := strconv.ParseBool(val) + t.WriteOnly = i + case "default": + t.Default = val + case "example": + t.Examples = append(t.Examples, val) + } + } + } +} + +// read struct tags for numberic type keyworks +func (t *Schema) numbericKeywords(tags []string) { + for _, tag := range tags { + nameValue := strings.Split(tag, "=") + if len(nameValue) == 2 { + name, val := nameValue[0], nameValue[1] + switch name { + case "multipleOf": + i, _ := strconv.Atoi(val) + t.MultipleOf = i + case "minimum": + i, _ := strconv.Atoi(val) + t.Minimum = i + case "maximum": + i, _ := strconv.Atoi(val) + t.Maximum = i + case "exclusiveMaximum": + b, _ := strconv.ParseBool(val) + t.ExclusiveMaximum = b + case "exclusiveMinimum": + b, _ := strconv.ParseBool(val) + t.ExclusiveMinimum = b + case "default": + i, _ := strconv.Atoi(val) + t.Default = i + case "example": + if i, err := strconv.Atoi(val); err == nil { + t.Examples = append(t.Examples, i) + } + } + } + } +} + +// read struct tags for object type keyworks +// func (t *Type) objectKeywords(tags []string) { +// for _, tag := range tags{ +// nameValue := strings.Split(tag, "=") +// name, val := nameValue[0], nameValue[1] +// switch name{ +// case "dependencies": +// t.Dependencies = val +// break; +// case "patternProperties": +// t.PatternProperties = val +// break; +// } +// } +// } + +// read struct tags for array type keyworks +func (t *Schema) arrayKeywords(tags []string) { + var defaultValues []interface{} + for _, tag := range tags { + nameValue := strings.Split(tag, "=") + if len(nameValue) == 2 { + name, val := nameValue[0], nameValue[1] + switch name { + case "minItems": + i, _ := strconv.Atoi(val) + t.MinItems = i + case "maxItems": + i, _ := strconv.Atoi(val) + t.MaxItems = i + case "uniqueItems": + t.UniqueItems = true + case "default": + defaultValues = append(defaultValues, val) + case "enum": + switch t.Items.Type { + case "string": + t.Items.Enum = append(t.Items.Enum, val) + case "integer": + i, _ := strconv.Atoi(val) + t.Items.Enum = append(t.Items.Enum, i) + case "number": + f, _ := strconv.ParseFloat(val, 64) + t.Items.Enum = append(t.Items.Enum, f) + } + case "format": + t.Items.Format = val + } + } + } + if len(defaultValues) > 0 { + t.Default = defaultValues + } +} + +func (t *Schema) extraKeywords(tags []string) { + for _, tag := range tags { + nameValue := strings.SplitN(tag, "=", 2) + if len(nameValue) == 2 { + t.setExtra(nameValue[0], nameValue[1]) + } + } +} + +func (t *Schema) setExtra(key, val string) { + if t.Extras == nil { + t.Extras = map[string]interface{}{} + } + if existingVal, ok := t.Extras[key]; ok { + switch existingVal := existingVal.(type) { + case string: + t.Extras[key] = []string{existingVal, val} + case []string: + t.Extras[key] = append(existingVal, val) + case int: + t.Extras[key], _ = strconv.Atoi(val) + case bool: + t.Extras[key] = (val == "true" || val == "t") + } + } else { + switch key { + case "minimum": + t.Extras[key], _ = strconv.Atoi(val) + default: + var x interface{} + if val == "true" { + x = true + } else if val == "false" { + x = false + } else { + x = val + } + t.Extras[key] = x + } + } +} + +func requiredFromJSONTags(tags []string) bool { + if ignoredByJSONTags(tags) { + return false + } + + for _, tag := range tags[1:] { + if tag == "omitempty" { + return false + } + } + return true +} + +func requiredFromJSONSchemaTags(tags []string) bool { + if ignoredByJSONSchemaTags(tags) { + return false + } + for _, tag := range tags { + if tag == "required" { + return true + } + } + return false +} + +func nullableFromJSONSchemaTags(tags []string) bool { + if ignoredByJSONSchemaTags(tags) { + return false + } + for _, tag := range tags { + if tag == "nullable" { + return true + } + } + return false +} + +func ignoredByJSONTags(tags []string) bool { + return tags[0] == "-" +} + +func ignoredByJSONSchemaTags(tags []string) bool { + return tags[0] == "-" +} + +func (r *Reflector) reflectFieldName(f reflect.StructField) (string, bool, bool, bool) { + jsonTagString, _ := f.Tag.Lookup("json") + jsonTags := strings.Split(jsonTagString, ",") + + if ignoredByJSONTags(jsonTags) { + return "", false, false, false + } + + schemaTags := strings.Split(f.Tag.Get("jsonschema"), ",") + if ignoredByJSONSchemaTags(schemaTags) { + return "", false, false, false + } + + required := requiredFromJSONTags(jsonTags) + if r.RequiredFromJSONSchemaTags { + required = requiredFromJSONSchemaTags(schemaTags) + } + + nullable := nullableFromJSONSchemaTags(schemaTags) + + if f.Anonymous && jsonTags[0] == "" { + // As per JSON Marshal rules, anonymous structs are inherited + if f.Type.Kind() == reflect.Struct { + return "", true, false, false + } + + // As per JSON Marshal rules, anonymous pointer to structs are inherited + if f.Type.Kind() == reflect.Ptr && f.Type.Elem().Kind() == reflect.Struct { + return "", true, false, false + } + } + + // Try to determine the name from the different combos + name := f.Name + if jsonTags[0] != "" { + name = jsonTags[0] + } + if !f.Anonymous && f.PkgPath != "" { + // field not anonymous and not export has no export name + name = "" + } else if r.KeyNamer != nil { + name = r.KeyNamer(name) + } + + return name, false, required, nullable +} + +// UnmarshalJSON is used to parse a schema object or boolean. +func (t *Schema) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, []byte("true")) { + *t = *TrueSchema + return nil + } else if bytes.Equal(data, []byte("false")) { + *t = *FalseSchema + return nil + } + type Schema_ Schema + aux := &struct { + *Schema_ + }{ + Schema_: (*Schema_)(t), + } + return json.Unmarshal(data, aux) +} + +func (t *Schema) MarshalJSON() ([]byte, error) { + if t.boolean != nil { + if *t.boolean { + return []byte("true"), nil + } else { + return []byte("false"), nil + } + } + if reflect.DeepEqual(&Schema{}, t) { + // Don't bother returning empty schemas + return []byte("true"), nil + } + type Schema_ Schema + b, err := json.Marshal((*Schema_)(t)) + if err != nil { + return nil, err + } + if t.Extras == nil || len(t.Extras) == 0 { + return b, nil + } + m, err := json.Marshal(t.Extras) + if err != nil { + return nil, err + } + if len(b) == 2 { + return m, nil + } + b[len(b)-1] = ',' + return append(b, m[1:]...), nil +} + +func (r *Reflector) typeName(t reflect.Type) string { + if r.Namer != nil { + if name := r.Namer(t); name != "" { + return name + } + } + return t.Name() +} + +// Split on commas that are not preceded by `\`. +// This way, we prevent splitting regexes +func splitOnUnescapedCommas(tagString string) []string { + ret := make([]string, 0) + separated := strings.Split(tagString, ",") + ret = append(ret, separated[0]) + i := 0 + for _, nextTag := range separated[1:] { + if len(ret[i]) == 0 { + ret = append(ret, nextTag) + i++ + continue + } + + if ret[i][len(ret[i])-1] == '\\' { + ret[i] = ret[i][:len(ret[i])-1] + "," + nextTag + } else { + ret = append(ret, nextTag) + i++ + } + } + + return ret +} + +func fullyQualifiedTypeName(t reflect.Type) string { + return t.PkgPath() + "." + t.Name() +} + +// AddGoComments will update the reflectors comment map with all the comments +// found in the provided source directories. See the #ExtractGoComments method +// for more details. +func (r *Reflector) AddGoComments(base, path string) error { + if r.CommentMap == nil { + r.CommentMap = make(map[string]string) + } + return ExtractGoComments(base, path, r.CommentMap) +} diff --git a/vendor/github.com/invopop/jsonschema/utils.go b/vendor/github.com/invopop/jsonschema/utils.go new file mode 100644 index 0000000000..9813b11c2f --- /dev/null +++ b/vendor/github.com/invopop/jsonschema/utils.go @@ -0,0 +1,18 @@ +package jsonschema + +import ( + "regexp" + "strings" +) + +var matchFirstCap = regexp.MustCompile("(.)([A-Z][a-z]+)") +var matchAllCap = regexp.MustCompile("([a-z0-9])([A-Z])") + +// ToSnakeCase converts the provided string into snake case using dashes. +// This is useful for Schema IDs and definitions to be coherent with +// common JSON Schema examples. +func ToSnakeCase(str string) string { + snake := matchFirstCap.ReplaceAllString(str, "${1}-${2}") + snake = matchAllCap.ReplaceAllString(snake, "${1}-${2}") + return strings.ToLower(snake) +} diff --git a/vendor/github.com/jbenet/go-context/LICENSE b/vendor/github.com/jbenet/go-context/LICENSE new file mode 100644 index 0000000000..c7386b3c94 --- /dev/null +++ b/vendor/github.com/jbenet/go-context/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Juan Batiz-Benet + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/jbenet/go-context/io/ctxio.go b/vendor/github.com/jbenet/go-context/io/ctxio.go new file mode 100644 index 0000000000..b4f2454235 --- /dev/null +++ b/vendor/github.com/jbenet/go-context/io/ctxio.go @@ -0,0 +1,120 @@ +// Package ctxio provides io.Reader and io.Writer wrappers that +// respect context.Contexts. Use these at the interface between +// your context code and your io. +// +// WARNING: read the code. see how writes and reads will continue +// until you cancel the io. Maybe this package should provide +// versions of io.ReadCloser and io.WriteCloser that automatically +// call .Close when the context expires. But for now -- since in my +// use cases I have long-lived connections with ephemeral io wrappers +// -- this has yet to be a need. +package ctxio + +import ( + "io" + + context "golang.org/x/net/context" +) + +type ioret struct { + n int + err error +} + +type Writer interface { + io.Writer +} + +type ctxWriter struct { + w io.Writer + ctx context.Context +} + +// NewWriter wraps a writer to make it respect given Context. +// If there is a blocking write, the returned Writer will return +// whenever the context is cancelled (the return values are n=0 +// and err=ctx.Err().) +// +// Note well: this wrapper DOES NOT ACTUALLY cancel the underlying +// write-- there is no way to do that with the standard go io +// interface. So the read and write _will_ happen or hang. So, use +// this sparingly, make sure to cancel the read or write as necesary +// (e.g. closing a connection whose context is up, etc.) +// +// Furthermore, in order to protect your memory from being read +// _after_ you've cancelled the context, this io.Writer will +// first make a **copy** of the buffer. +func NewWriter(ctx context.Context, w io.Writer) *ctxWriter { + if ctx == nil { + ctx = context.Background() + } + return &ctxWriter{ctx: ctx, w: w} +} + +func (w *ctxWriter) Write(buf []byte) (int, error) { + buf2 := make([]byte, len(buf)) + copy(buf2, buf) + + c := make(chan ioret, 1) + + go func() { + n, err := w.w.Write(buf2) + c <- ioret{n, err} + close(c) + }() + + select { + case r := <-c: + return r.n, r.err + case <-w.ctx.Done(): + return 0, w.ctx.Err() + } +} + +type Reader interface { + io.Reader +} + +type ctxReader struct { + r io.Reader + ctx context.Context +} + +// NewReader wraps a reader to make it respect given Context. +// If there is a blocking read, the returned Reader will return +// whenever the context is cancelled (the return values are n=0 +// and err=ctx.Err().) +// +// Note well: this wrapper DOES NOT ACTUALLY cancel the underlying +// write-- there is no way to do that with the standard go io +// interface. So the read and write _will_ happen or hang. So, use +// this sparingly, make sure to cancel the read or write as necesary +// (e.g. closing a connection whose context is up, etc.) +// +// Furthermore, in order to protect your memory from being read +// _before_ you've cancelled the context, this io.Reader will +// allocate a buffer of the same size, and **copy** into the client's +// if the read succeeds in time. +func NewReader(ctx context.Context, r io.Reader) *ctxReader { + return &ctxReader{ctx: ctx, r: r} +} + +func (r *ctxReader) Read(buf []byte) (int, error) { + buf2 := make([]byte, len(buf)) + + c := make(chan ioret, 1) + + go func() { + n, err := r.r.Read(buf2) + c <- ioret{n, err} + close(c) + }() + + select { + case ret := <-c: + copy(buf, buf2) + return ret.n, ret.err + case <-r.ctx.Done(): + return 0, r.ctx.Err() + } +} diff --git a/vendor/github.com/kevinburke/ssh_config/.gitattributes b/vendor/github.com/kevinburke/ssh_config/.gitattributes new file mode 100644 index 0000000000..44db581889 --- /dev/null +++ b/vendor/github.com/kevinburke/ssh_config/.gitattributes @@ -0,0 +1 @@ +testdata/dos-lines eol=crlf diff --git a/vendor/github.com/kevinburke/ssh_config/.gitignore b/vendor/github.com/kevinburke/ssh_config/.gitignore new file mode 100644 index 0000000000..e69de29bb2 diff --git a/vendor/github.com/kevinburke/ssh_config/.mailmap b/vendor/github.com/kevinburke/ssh_config/.mailmap new file mode 100644 index 0000000000..253406b1cc --- /dev/null +++ b/vendor/github.com/kevinburke/ssh_config/.mailmap @@ -0,0 +1 @@ +Kevin Burke Kevin Burke diff --git a/vendor/github.com/kevinburke/ssh_config/AUTHORS.txt b/vendor/github.com/kevinburke/ssh_config/AUTHORS.txt new file mode 100644 index 0000000000..9dc410104b --- /dev/null +++ b/vendor/github.com/kevinburke/ssh_config/AUTHORS.txt @@ -0,0 +1,6 @@ +Eugene Terentev +Kevin Burke +Mark Nevill +Sergey Lukjanov +Wayne Ashley Berry +santosh653 <70637961+santosh653@users.noreply.github.com> diff --git a/vendor/github.com/kevinburke/ssh_config/LICENSE b/vendor/github.com/kevinburke/ssh_config/LICENSE new file mode 100644 index 0000000000..b9a770ac2a --- /dev/null +++ b/vendor/github.com/kevinburke/ssh_config/LICENSE @@ -0,0 +1,49 @@ +Copyright (c) 2017 Kevin Burke. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +=================== + +The lexer and parser borrow heavily from github.com/pelletier/go-toml. The +license for that project is copied below. + +The MIT License (MIT) + +Copyright (c) 2013 - 2017 Thomas Pelletier, Eric Anderton + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/kevinburke/ssh_config/Makefile b/vendor/github.com/kevinburke/ssh_config/Makefile new file mode 100644 index 0000000000..a1880d18e1 --- /dev/null +++ b/vendor/github.com/kevinburke/ssh_config/Makefile @@ -0,0 +1,30 @@ +BUMP_VERSION := $(GOPATH)/bin/bump_version +STATICCHECK := $(GOPATH)/bin/staticcheck +WRITE_MAILMAP := $(GOPATH)/bin/write_mailmap + +$(STATICCHECK): + go get honnef.co/go/tools/cmd/staticcheck + +lint: $(STATICCHECK) + go vet ./... + $(STATICCHECK) + +test: lint + @# the timeout helps guard against infinite recursion + go test -timeout=250ms ./... + +race-test: lint + go test -timeout=500ms -race ./... + +$(BUMP_VERSION): + go get -u github.com/kevinburke/bump_version + +release: test | $(BUMP_VERSION) + $(BUMP_VERSION) minor config.go + +force: ; + +AUTHORS.txt: force | $(WRITE_MAILMAP) + $(WRITE_MAILMAP) > AUTHORS.txt + +authors: AUTHORS.txt diff --git a/vendor/github.com/kevinburke/ssh_config/README.md b/vendor/github.com/kevinburke/ssh_config/README.md new file mode 100644 index 0000000000..d1adfed7c6 --- /dev/null +++ b/vendor/github.com/kevinburke/ssh_config/README.md @@ -0,0 +1,89 @@ +# ssh_config + +This is a Go parser for `ssh_config` files. Importantly, this parser attempts +to preserve comments in a given file, so you can manipulate a `ssh_config` file +from a program, if your heart desires. + +It's designed to be used with the excellent +[x/crypto/ssh](https://golang.org/x/crypto/ssh) package, which handles SSH +negotiation but isn't very easy to configure. + +The `ssh_config` `Get()` and `GetStrict()` functions will attempt to read values +from `$HOME/.ssh/config` and fall back to `/etc/ssh/ssh_config`. The first +argument is the host name to match on, and the second argument is the key you +want to retrieve. + +```go +port := ssh_config.Get("myhost", "Port") +``` + +Certain directives can occur multiple times for a host (such as `IdentityFile`), +so you should use the `GetAll` or `GetAllStrict` directive to retrieve those +instead. + +```go +files := ssh_config.GetAll("myhost", "IdentityFile") +``` + +You can also load a config file and read values from it. + +```go +var config = ` +Host *.test + Compression yes +` + +cfg, err := ssh_config.Decode(strings.NewReader(config)) +fmt.Println(cfg.Get("example.test", "Port")) +``` + +Some SSH arguments have default values - for example, the default value for +`KeyboardAuthentication` is `"yes"`. If you call Get(), and no value for the +given Host/keyword pair exists in the config, we'll return a default for the +keyword if one exists. + +### Manipulating SSH config files + +Here's how you can manipulate an SSH config file, and then write it back to +disk. + +```go +f, _ := os.Open(filepath.Join(os.Getenv("HOME"), ".ssh", "config")) +cfg, _ := ssh_config.Decode(f) +for _, host := range cfg.Hosts { + fmt.Println("patterns:", host.Patterns) + for _, node := range host.Nodes { + // Manipulate the nodes as you see fit, or use a type switch to + // distinguish between Empty, KV, and Include nodes. + fmt.Println(node.String()) + } +} + +// Print the config to stdout: +fmt.Println(cfg.String()) +``` + +## Spec compliance + +Wherever possible we try to implement the specification as documented in +the `ssh_config` manpage. Unimplemented features should be present in the +[issues][issues] list. + +Notably, the `Match` directive is currently unsupported. + +[issues]: https://github.com/kevinburke/ssh_config/issues + +## Errata + +This is the second [comment-preserving configuration parser][blog] I've written, after +[an /etc/hosts parser][hostsfile]. Eventually, I will write one for every Linux +file format. + +[blog]: https://kev.inburke.com/kevin/more-comment-preserving-configuration-parsers/ +[hostsfile]: https://github.com/kevinburke/hostsfile + +## Donating + +Donations free up time to make improvements to the library, and respond to +bug reports. You can send donations via Paypal's "Send Money" feature to +kev@inburke.com. Donations are not tax deductible in the USA. diff --git a/vendor/github.com/kevinburke/ssh_config/config.go b/vendor/github.com/kevinburke/ssh_config/config.go new file mode 100644 index 0000000000..7c2c8b679c --- /dev/null +++ b/vendor/github.com/kevinburke/ssh_config/config.go @@ -0,0 +1,793 @@ +// Package ssh_config provides tools for manipulating SSH config files. +// +// Importantly, this parser attempts to preserve comments in a given file, so +// you can manipulate a `ssh_config` file from a program, if your heart desires. +// +// The Get() and GetStrict() functions will attempt to read values from +// $HOME/.ssh/config, falling back to /etc/ssh/ssh_config. The first argument is +// the host name to match on ("example.com"), and the second argument is the key +// you want to retrieve ("Port"). The keywords are case insensitive. +// +// port := ssh_config.Get("myhost", "Port") +// +// You can also manipulate an SSH config file and then print it or write it back +// to disk. +// +// f, _ := os.Open(filepath.Join(os.Getenv("HOME"), ".ssh", "config")) +// cfg, _ := ssh_config.Decode(f) +// for _, host := range cfg.Hosts { +// fmt.Println("patterns:", host.Patterns) +// for _, node := range host.Nodes { +// fmt.Println(node.String()) +// } +// } +// +// // Write the cfg back to disk: +// fmt.Println(cfg.String()) +// +// BUG: the Match directive is currently unsupported; parsing a config with +// a Match directive will trigger an error. +package ssh_config + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + osuser "os/user" + "path/filepath" + "regexp" + "runtime" + "strings" + "sync" +) + +const version = "1.0" + +var _ = version + +type configFinder func() string + +// UserSettings checks ~/.ssh and /etc/ssh for configuration files. The config +// files are parsed and cached the first time Get() or GetStrict() is called. +type UserSettings struct { + IgnoreErrors bool + systemConfig *Config + systemConfigFinder configFinder + userConfig *Config + userConfigFinder configFinder + loadConfigs sync.Once + onceErr error +} + +func homedir() string { + user, err := osuser.Current() + if err == nil { + return user.HomeDir + } else { + return os.Getenv("HOME") + } +} + +func userConfigFinder() string { + return filepath.Join(homedir(), ".ssh", "config") +} + +// DefaultUserSettings is the default UserSettings and is used by Get and +// GetStrict. It checks both $HOME/.ssh/config and /etc/ssh/ssh_config for keys, +// and it will return parse errors (if any) instead of swallowing them. +var DefaultUserSettings = &UserSettings{ + IgnoreErrors: false, + systemConfigFinder: systemConfigFinder, + userConfigFinder: userConfigFinder, +} + +func systemConfigFinder() string { + return filepath.Join("/", "etc", "ssh", "ssh_config") +} + +func findVal(c *Config, alias, key string) (string, error) { + if c == nil { + return "", nil + } + val, err := c.Get(alias, key) + if err != nil || val == "" { + return "", err + } + if err := validate(key, val); err != nil { + return "", err + } + return val, nil +} + +func findAll(c *Config, alias, key string) ([]string, error) { + if c == nil { + return nil, nil + } + return c.GetAll(alias, key) +} + +// Get finds the first value for key within a declaration that matches the +// alias. Get returns the empty string if no value was found, or if IgnoreErrors +// is false and we could not parse the configuration file. Use GetStrict to +// disambiguate the latter cases. +// +// The match for key is case insensitive. +// +// Get is a wrapper around DefaultUserSettings.Get. +func Get(alias, key string) string { + return DefaultUserSettings.Get(alias, key) +} + +// GetAll retrieves zero or more directives for key for the given alias. GetAll +// returns nil if no value was found, or if IgnoreErrors is false and we could +// not parse the configuration file. Use GetAllStrict to disambiguate the +// latter cases. +// +// In most cases you want to use Get or GetStrict, which returns a single value. +// However, a subset of ssh configuration values (IdentityFile, for example) +// allow you to specify multiple directives. +// +// The match for key is case insensitive. +// +// GetAll is a wrapper around DefaultUserSettings.GetAll. +func GetAll(alias, key string) []string { + return DefaultUserSettings.GetAll(alias, key) +} + +// GetStrict finds the first value for key within a declaration that matches the +// alias. If key has a default value and no matching configuration is found, the +// default will be returned. For more information on default values and the way +// patterns are matched, see the manpage for ssh_config. +// +// The returned error will be non-nil if and only if a user's configuration file +// or the system configuration file could not be parsed, and u.IgnoreErrors is +// false. +// +// GetStrict is a wrapper around DefaultUserSettings.GetStrict. +func GetStrict(alias, key string) (string, error) { + return DefaultUserSettings.GetStrict(alias, key) +} + +// GetAllStrict retrieves zero or more directives for key for the given alias. +// +// In most cases you want to use Get or GetStrict, which returns a single value. +// However, a subset of ssh configuration values (IdentityFile, for example) +// allow you to specify multiple directives. +// +// The returned error will be non-nil if and only if a user's configuration file +// or the system configuration file could not be parsed, and u.IgnoreErrors is +// false. +// +// GetAllStrict is a wrapper around DefaultUserSettings.GetAllStrict. +func GetAllStrict(alias, key string) ([]string, error) { + return DefaultUserSettings.GetAllStrict(alias, key) +} + +// Get finds the first value for key within a declaration that matches the +// alias. Get returns the empty string if no value was found, or if IgnoreErrors +// is false and we could not parse the configuration file. Use GetStrict to +// disambiguate the latter cases. +// +// The match for key is case insensitive. +func (u *UserSettings) Get(alias, key string) string { + val, err := u.GetStrict(alias, key) + if err != nil { + return "" + } + return val +} + +// GetAll retrieves zero or more directives for key for the given alias. GetAll +// returns nil if no value was found, or if IgnoreErrors is false and we could +// not parse the configuration file. Use GetStrict to disambiguate the latter +// cases. +// +// The match for key is case insensitive. +func (u *UserSettings) GetAll(alias, key string) []string { + val, _ := u.GetAllStrict(alias, key) + return val +} + +// GetStrict finds the first value for key within a declaration that matches the +// alias. If key has a default value and no matching configuration is found, the +// default will be returned. For more information on default values and the way +// patterns are matched, see the manpage for ssh_config. +// +// error will be non-nil if and only if a user's configuration file or the +// system configuration file could not be parsed, and u.IgnoreErrors is false. +func (u *UserSettings) GetStrict(alias, key string) (string, error) { + u.doLoadConfigs() + //lint:ignore S1002 I prefer it this way + if u.onceErr != nil && u.IgnoreErrors == false { + return "", u.onceErr + } + val, err := findVal(u.userConfig, alias, key) + if err != nil || val != "" { + return val, err + } + val2, err2 := findVal(u.systemConfig, alias, key) + if err2 != nil || val2 != "" { + return val2, err2 + } + return Default(key), nil +} + +// GetAllStrict retrieves zero or more directives for key for the given alias. +// If key has a default value and no matching configuration is found, the +// default will be returned. For more information on default values and the way +// patterns are matched, see the manpage for ssh_config. +// +// The returned error will be non-nil if and only if a user's configuration file +// or the system configuration file could not be parsed, and u.IgnoreErrors is +// false. +func (u *UserSettings) GetAllStrict(alias, key string) ([]string, error) { + u.doLoadConfigs() + //lint:ignore S1002 I prefer it this way + if u.onceErr != nil && u.IgnoreErrors == false { + return nil, u.onceErr + } + val, err := findAll(u.userConfig, alias, key) + if err != nil || val != nil { + return val, err + } + val2, err2 := findAll(u.systemConfig, alias, key) + if err2 != nil || val2 != nil { + return val2, err2 + } + // TODO: IdentityFile has multiple default values that we should return. + if def := Default(key); def != "" { + return []string{def}, nil + } + return []string{}, nil +} + +func (u *UserSettings) doLoadConfigs() { + u.loadConfigs.Do(func() { + // can't parse user file, that's ok. + var filename string + if u.userConfigFinder == nil { + filename = userConfigFinder() + } else { + filename = u.userConfigFinder() + } + var err error + u.userConfig, err = parseFile(filename) + //lint:ignore S1002 I prefer it this way + if err != nil && os.IsNotExist(err) == false { + u.onceErr = err + return + } + if u.systemConfigFinder == nil { + filename = systemConfigFinder() + } else { + filename = u.systemConfigFinder() + } + u.systemConfig, err = parseFile(filename) + //lint:ignore S1002 I prefer it this way + if err != nil && os.IsNotExist(err) == false { + u.onceErr = err + return + } + }) +} + +func parseFile(filename string) (*Config, error) { + return parseWithDepth(filename, 0) +} + +func parseWithDepth(filename string, depth uint8) (*Config, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return decodeBytes(b, isSystem(filename), depth) +} + +func isSystem(filename string) bool { + // TODO: not sure this is the best way to detect a system repo + return strings.HasPrefix(filepath.Clean(filename), "/etc/ssh") +} + +// Decode reads r into a Config, or returns an error if r could not be parsed as +// an SSH config file. +func Decode(r io.Reader) (*Config, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + return decodeBytes(b, false, 0) +} + +func decodeBytes(b []byte, system bool, depth uint8) (c *Config, err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + if e, ok := r.(error); ok && e == ErrDepthExceeded { + err = e + return + } + err = errors.New(r.(string)) + } + }() + + c = parseSSH(lexSSH(b), system, depth) + return c, err +} + +// Config represents an SSH config file. +type Config struct { + // A list of hosts to match against. The file begins with an implicit + // "Host *" declaration matching all hosts. + Hosts []*Host + depth uint8 + position Position +} + +// Get finds the first value in the configuration that matches the alias and +// contains key. Get returns the empty string if no value was found, or if the +// Config contains an invalid conditional Include value. +// +// The match for key is case insensitive. +func (c *Config) Get(alias, key string) (string, error) { + lowerKey := strings.ToLower(key) + for _, host := range c.Hosts { + if !host.Matches(alias) { + continue + } + for _, node := range host.Nodes { + switch t := node.(type) { + case *Empty: + continue + case *KV: + // "keys are case insensitive" per the spec + lkey := strings.ToLower(t.Key) + if lkey == "match" { + panic("can't handle Match directives") + } + if lkey == lowerKey { + return t.Value, nil + } + case *Include: + val := t.Get(alias, key) + if val != "" { + return val, nil + } + default: + return "", fmt.Errorf("unknown Node type %v", t) + } + } + } + return "", nil +} + +// GetAll returns all values in the configuration that match the alias and +// contains key, or nil if none are present. +func (c *Config) GetAll(alias, key string) ([]string, error) { + lowerKey := strings.ToLower(key) + all := []string(nil) + for _, host := range c.Hosts { + if !host.Matches(alias) { + continue + } + for _, node := range host.Nodes { + switch t := node.(type) { + case *Empty: + continue + case *KV: + // "keys are case insensitive" per the spec + lkey := strings.ToLower(t.Key) + if lkey == "match" { + panic("can't handle Match directives") + } + if lkey == lowerKey { + all = append(all, t.Value) + } + case *Include: + val, _ := t.GetAll(alias, key) + if len(val) > 0 { + all = append(all, val...) + } + default: + return nil, fmt.Errorf("unknown Node type %v", t) + } + } + } + + return all, nil +} + +// String returns a string representation of the Config file. +func (c Config) String() string { + return marshal(c).String() +} + +func (c Config) MarshalText() ([]byte, error) { + return marshal(c).Bytes(), nil +} + +func marshal(c Config) *bytes.Buffer { + var buf bytes.Buffer + for i := range c.Hosts { + buf.WriteString(c.Hosts[i].String()) + } + return &buf +} + +// Pattern is a pattern in a Host declaration. Patterns are read-only values; +// create a new one with NewPattern(). +type Pattern struct { + str string // Its appearance in the file, not the value that gets compiled. + regex *regexp.Regexp + not bool // True if this is a negated match +} + +// String prints the string representation of the pattern. +func (p Pattern) String() string { + return p.str +} + +// Copied from regexp.go with * and ? removed. +var specialBytes = []byte(`\.+()|[]{}^$`) + +func special(b byte) bool { + return bytes.IndexByte(specialBytes, b) >= 0 +} + +// NewPattern creates a new Pattern for matching hosts. NewPattern("*") creates +// a Pattern that matches all hosts. +// +// From the manpage, a pattern consists of zero or more non-whitespace +// characters, `*' (a wildcard that matches zero or more characters), or `?' (a +// wildcard that matches exactly one character). For example, to specify a set +// of declarations for any host in the ".co.uk" set of domains, the following +// pattern could be used: +// +// Host *.co.uk +// +// The following pattern would match any host in the 192.168.0.[0-9] network range: +// +// Host 192.168.0.? +func NewPattern(s string) (*Pattern, error) { + if s == "" { + return nil, errors.New("ssh_config: empty pattern") + } + negated := false + if s[0] == '!' { + negated = true + s = s[1:] + } + var buf bytes.Buffer + buf.WriteByte('^') + for i := 0; i < len(s); i++ { + // A byte loop is correct because all metacharacters are ASCII. + switch b := s[i]; b { + case '*': + buf.WriteString(".*") + case '?': + buf.WriteString(".?") + default: + // borrowing from QuoteMeta here. + if special(b) { + buf.WriteByte('\\') + } + buf.WriteByte(b) + } + } + buf.WriteByte('$') + r, err := regexp.Compile(buf.String()) + if err != nil { + return nil, err + } + return &Pattern{str: s, regex: r, not: negated}, nil +} + +// Host describes a Host directive and the keywords that follow it. +type Host struct { + // A list of host patterns that should match this host. + Patterns []*Pattern + // A Node is either a key/value pair or a comment line. + Nodes []Node + // EOLComment is the comment (if any) terminating the Host line. + EOLComment string + hasEquals bool + leadingSpace int // TODO: handle spaces vs tabs here. + // The file starts with an implicit "Host *" declaration. + implicit bool +} + +// Matches returns true if the Host matches for the given alias. For +// a description of the rules that provide a match, see the manpage for +// ssh_config. +func (h *Host) Matches(alias string) bool { + found := false + for i := range h.Patterns { + if h.Patterns[i].regex.MatchString(alias) { + if h.Patterns[i].not { + // Negated match. "A pattern entry may be negated by prefixing + // it with an exclamation mark (`!'). If a negated entry is + // matched, then the Host entry is ignored, regardless of + // whether any other patterns on the line match. Negated matches + // are therefore useful to provide exceptions for wildcard + // matches." + return false + } + found = true + } + } + return found +} + +// String prints h as it would appear in a config file. Minor tweaks may be +// present in the whitespace in the printed file. +func (h *Host) String() string { + var buf bytes.Buffer + //lint:ignore S1002 I prefer to write it this way + if h.implicit == false { + buf.WriteString(strings.Repeat(" ", int(h.leadingSpace))) + buf.WriteString("Host") + if h.hasEquals { + buf.WriteString(" = ") + } else { + buf.WriteString(" ") + } + for i, pat := range h.Patterns { + buf.WriteString(pat.String()) + if i < len(h.Patterns)-1 { + buf.WriteString(" ") + } + } + if h.EOLComment != "" { + buf.WriteString(" #") + buf.WriteString(h.EOLComment) + } + buf.WriteByte('\n') + } + for i := range h.Nodes { + buf.WriteString(h.Nodes[i].String()) + buf.WriteByte('\n') + } + return buf.String() +} + +// Node represents a line in a Config. +type Node interface { + Pos() Position + String() string +} + +// KV is a line in the config file that contains a key, a value, and possibly +// a comment. +type KV struct { + Key string + Value string + Comment string + hasEquals bool + leadingSpace int // Space before the key. TODO handle spaces vs tabs. + position Position +} + +// Pos returns k's Position. +func (k *KV) Pos() Position { + return k.position +} + +// String prints k as it was parsed in the config file. There may be slight +// changes to the whitespace between values. +func (k *KV) String() string { + if k == nil { + return "" + } + equals := " " + if k.hasEquals { + equals = " = " + } + line := fmt.Sprintf("%s%s%s%s", strings.Repeat(" ", int(k.leadingSpace)), k.Key, equals, k.Value) + if k.Comment != "" { + line += " #" + k.Comment + } + return line +} + +// Empty is a line in the config file that contains only whitespace or comments. +type Empty struct { + Comment string + leadingSpace int // TODO handle spaces vs tabs. + position Position +} + +// Pos returns e's Position. +func (e *Empty) Pos() Position { + return e.position +} + +// String prints e as it was parsed in the config file. +func (e *Empty) String() string { + if e == nil { + return "" + } + if e.Comment == "" { + return "" + } + return fmt.Sprintf("%s#%s", strings.Repeat(" ", int(e.leadingSpace)), e.Comment) +} + +// Include holds the result of an Include directive, including the config files +// that have been parsed as part of that directive. At most 5 levels of Include +// statements will be parsed. +type Include struct { + // Comment is the contents of any comment at the end of the Include + // statement. + Comment string + // an include directive can include several different files, and wildcards + directives []string + + mu sync.Mutex + // 1:1 mapping between matches and keys in files array; matches preserves + // ordering + matches []string + // actual filenames are listed here + files map[string]*Config + leadingSpace int + position Position + depth uint8 + hasEquals bool +} + +const maxRecurseDepth = 5 + +// ErrDepthExceeded is returned if too many Include directives are parsed. +// Usually this indicates a recursive loop (an Include directive pointing to the +// file it contains). +var ErrDepthExceeded = errors.New("ssh_config: max recurse depth exceeded") + +func removeDups(arr []string) []string { + // Use map to record duplicates as we find them. + encountered := make(map[string]bool, len(arr)) + result := make([]string, 0) + + for v := range arr { + //lint:ignore S1002 I prefer it this way + if encountered[arr[v]] == false { + encountered[arr[v]] = true + result = append(result, arr[v]) + } + } + return result +} + +// NewInclude creates a new Include with a list of file globs to include. +// Configuration files are parsed greedily (e.g. as soon as this function runs). +// Any error encountered while parsing nested configuration files will be +// returned. +func NewInclude(directives []string, hasEquals bool, pos Position, comment string, system bool, depth uint8) (*Include, error) { + if depth > maxRecurseDepth { + return nil, ErrDepthExceeded + } + inc := &Include{ + Comment: comment, + directives: directives, + files: make(map[string]*Config), + position: pos, + leadingSpace: pos.Col - 1, + depth: depth, + hasEquals: hasEquals, + } + // no need for inc.mu.Lock() since nothing else can access this inc + matches := make([]string, 0) + for i := range directives { + var path string + if filepath.IsAbs(directives[i]) { + path = directives[i] + } else if system { + path = filepath.Join("/etc/ssh", directives[i]) + } else { + path = filepath.Join(homedir(), ".ssh", directives[i]) + } + theseMatches, err := filepath.Glob(path) + if err != nil { + return nil, err + } + matches = append(matches, theseMatches...) + } + matches = removeDups(matches) + inc.matches = matches + for i := range matches { + config, err := parseWithDepth(matches[i], depth) + if err != nil { + return nil, err + } + inc.files[matches[i]] = config + } + return inc, nil +} + +// Pos returns the position of the Include directive in the larger file. +func (i *Include) Pos() Position { + return i.position +} + +// Get finds the first value in the Include statement matching the alias and the +// given key. +func (inc *Include) Get(alias, key string) string { + inc.mu.Lock() + defer inc.mu.Unlock() + // TODO: we search files in any order which is not correct + for i := range inc.matches { + cfg := inc.files[inc.matches[i]] + if cfg == nil { + panic("nil cfg") + } + val, err := cfg.Get(alias, key) + if err == nil && val != "" { + return val + } + } + return "" +} + +// GetAll finds all values in the Include statement matching the alias and the +// given key. +func (inc *Include) GetAll(alias, key string) ([]string, error) { + inc.mu.Lock() + defer inc.mu.Unlock() + var vals []string + + // TODO: we search files in any order which is not correct + for i := range inc.matches { + cfg := inc.files[inc.matches[i]] + if cfg == nil { + panic("nil cfg") + } + val, err := cfg.GetAll(alias, key) + if err == nil && len(val) != 0 { + // In theory if SupportsMultiple was false for this key we could + // stop looking here. But the caller has asked us to find all + // instances of the keyword (and could use Get() if they wanted) so + // let's keep looking. + vals = append(vals, val...) + } + } + return vals, nil +} + +// String prints out a string representation of this Include directive. Note +// included Config files are not printed as part of this representation. +func (inc *Include) String() string { + equals := " " + if inc.hasEquals { + equals = " = " + } + line := fmt.Sprintf("%sInclude%s%s", strings.Repeat(" ", int(inc.leadingSpace)), equals, strings.Join(inc.directives, " ")) + if inc.Comment != "" { + line += " #" + inc.Comment + } + return line +} + +var matchAll *Pattern + +func init() { + var err error + matchAll, err = NewPattern("*") + if err != nil { + panic(err) + } +} + +func newConfig() *Config { + return &Config{ + Hosts: []*Host{ + &Host{ + implicit: true, + Patterns: []*Pattern{matchAll}, + Nodes: make([]Node, 0), + }, + }, + depth: 0, + } +} diff --git a/vendor/github.com/kevinburke/ssh_config/lexer.go b/vendor/github.com/kevinburke/ssh_config/lexer.go new file mode 100644 index 0000000000..11680b4c74 --- /dev/null +++ b/vendor/github.com/kevinburke/ssh_config/lexer.go @@ -0,0 +1,240 @@ +package ssh_config + +import ( + "bytes" +) + +// Define state functions +type sshLexStateFn func() sshLexStateFn + +type sshLexer struct { + inputIdx int + input []rune // Textual source + + buffer []rune // Runes composing the current token + tokens chan token + line int + col int + endbufferLine int + endbufferCol int +} + +func (s *sshLexer) lexComment(previousState sshLexStateFn) sshLexStateFn { + return func() sshLexStateFn { + growingString := "" + for next := s.peek(); next != '\n' && next != eof; next = s.peek() { + if next == '\r' && s.follow("\r\n") { + break + } + growingString += string(next) + s.next() + } + s.emitWithValue(tokenComment, growingString) + s.skip() + return previousState + } +} + +// lex the space after an equals sign in a function +func (s *sshLexer) lexRspace() sshLexStateFn { + for { + next := s.peek() + if !isSpace(next) { + break + } + s.skip() + } + return s.lexRvalue +} + +func (s *sshLexer) lexEquals() sshLexStateFn { + for { + next := s.peek() + if next == '=' { + s.emit(tokenEquals) + s.skip() + return s.lexRspace + } + // TODO error handling here; newline eof etc. + if !isSpace(next) { + break + } + s.skip() + } + return s.lexRvalue +} + +func (s *sshLexer) lexKey() sshLexStateFn { + growingString := "" + + for r := s.peek(); isKeyChar(r); r = s.peek() { + // simplified a lot here + if isSpace(r) || r == '=' { + s.emitWithValue(tokenKey, growingString) + s.skip() + return s.lexEquals + } + growingString += string(r) + s.next() + } + s.emitWithValue(tokenKey, growingString) + return s.lexEquals +} + +func (s *sshLexer) lexRvalue() sshLexStateFn { + growingString := "" + for { + next := s.peek() + switch next { + case '\r': + if s.follow("\r\n") { + s.emitWithValue(tokenString, growingString) + s.skip() + return s.lexVoid + } + case '\n': + s.emitWithValue(tokenString, growingString) + s.skip() + return s.lexVoid + case '#': + s.emitWithValue(tokenString, growingString) + s.skip() + return s.lexComment(s.lexVoid) + case eof: + s.next() + } + if next == eof { + break + } + growingString += string(next) + s.next() + } + s.emit(tokenEOF) + return nil +} + +func (s *sshLexer) read() rune { + r := s.peek() + if r == '\n' { + s.endbufferLine++ + s.endbufferCol = 1 + } else { + s.endbufferCol++ + } + s.inputIdx++ + return r +} + +func (s *sshLexer) next() rune { + r := s.read() + + if r != eof { + s.buffer = append(s.buffer, r) + } + return r +} + +func (s *sshLexer) lexVoid() sshLexStateFn { + for { + next := s.peek() + switch next { + case '#': + s.skip() + return s.lexComment(s.lexVoid) + case '\r': + fallthrough + case '\n': + s.emit(tokenEmptyLine) + s.skip() + continue + } + + if isSpace(next) { + s.skip() + } + + if isKeyStartChar(next) { + return s.lexKey + } + + // removed IsKeyStartChar and lexKey. probably will need to readd + + if next == eof { + s.next() + break + } + } + + s.emit(tokenEOF) + return nil +} + +func (s *sshLexer) ignore() { + s.buffer = make([]rune, 0) + s.line = s.endbufferLine + s.col = s.endbufferCol +} + +func (s *sshLexer) skip() { + s.next() + s.ignore() +} + +func (s *sshLexer) emit(t tokenType) { + s.emitWithValue(t, string(s.buffer)) +} + +func (s *sshLexer) emitWithValue(t tokenType, value string) { + tok := token{ + Position: Position{s.line, s.col}, + typ: t, + val: value, + } + s.tokens <- tok + s.ignore() +} + +func (s *sshLexer) peek() rune { + if s.inputIdx >= len(s.input) { + return eof + } + + r := s.input[s.inputIdx] + return r +} + +func (s *sshLexer) follow(next string) bool { + inputIdx := s.inputIdx + for _, expectedRune := range next { + if inputIdx >= len(s.input) { + return false + } + r := s.input[inputIdx] + inputIdx++ + if expectedRune != r { + return false + } + } + return true +} + +func (s *sshLexer) run() { + for state := s.lexVoid; state != nil; { + state = state() + } + close(s.tokens) +} + +func lexSSH(input []byte) chan token { + runes := bytes.Runes(input) + l := &sshLexer{ + input: runes, + tokens: make(chan token), + line: 1, + col: 1, + endbufferLine: 1, + endbufferCol: 1, + } + go l.run() + return l.tokens +} diff --git a/vendor/github.com/kevinburke/ssh_config/parser.go b/vendor/github.com/kevinburke/ssh_config/parser.go new file mode 100644 index 0000000000..36c42055f5 --- /dev/null +++ b/vendor/github.com/kevinburke/ssh_config/parser.go @@ -0,0 +1,191 @@ +package ssh_config + +import ( + "fmt" + "strings" +) + +type sshParser struct { + flow chan token + config *Config + tokensBuffer []token + currentTable []string + seenTableKeys []string + // /etc/ssh parser or local parser - used to find the default for relative + // filepaths in the Include directive + system bool + depth uint8 +} + +type sshParserStateFn func() sshParserStateFn + +// Formats and panics an error message based on a token +func (p *sshParser) raiseErrorf(tok *token, msg string, args ...interface{}) { + // TODO this format is ugly + panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...)) +} + +func (p *sshParser) raiseError(tok *token, err error) { + if err == ErrDepthExceeded { + panic(err) + } + // TODO this format is ugly + panic(tok.Position.String() + ": " + err.Error()) +} + +func (p *sshParser) run() { + for state := p.parseStart; state != nil; { + state = state() + } +} + +func (p *sshParser) peek() *token { + if len(p.tokensBuffer) != 0 { + return &(p.tokensBuffer[0]) + } + + tok, ok := <-p.flow + if !ok { + return nil + } + p.tokensBuffer = append(p.tokensBuffer, tok) + return &tok +} + +func (p *sshParser) getToken() *token { + if len(p.tokensBuffer) != 0 { + tok := p.tokensBuffer[0] + p.tokensBuffer = p.tokensBuffer[1:] + return &tok + } + tok, ok := <-p.flow + if !ok { + return nil + } + return &tok +} + +func (p *sshParser) parseStart() sshParserStateFn { + tok := p.peek() + + // end of stream, parsing is finished + if tok == nil { + return nil + } + + switch tok.typ { + case tokenComment, tokenEmptyLine: + return p.parseComment + case tokenKey: + return p.parseKV + case tokenEOF: + return nil + default: + p.raiseErrorf(tok, fmt.Sprintf("unexpected token %q\n", tok)) + } + return nil +} + +func (p *sshParser) parseKV() sshParserStateFn { + key := p.getToken() + hasEquals := false + val := p.getToken() + if val.typ == tokenEquals { + hasEquals = true + val = p.getToken() + } + comment := "" + tok := p.peek() + if tok == nil { + tok = &token{typ: tokenEOF} + } + if tok.typ == tokenComment && tok.Position.Line == val.Position.Line { + tok = p.getToken() + comment = tok.val + } + if strings.ToLower(key.val) == "match" { + // https://github.com/kevinburke/ssh_config/issues/6 + p.raiseErrorf(val, "ssh_config: Match directive parsing is unsupported") + return nil + } + if strings.ToLower(key.val) == "host" { + strPatterns := strings.Split(val.val, " ") + patterns := make([]*Pattern, 0) + for i := range strPatterns { + if strPatterns[i] == "" { + continue + } + pat, err := NewPattern(strPatterns[i]) + if err != nil { + p.raiseErrorf(val, "Invalid host pattern: %v", err) + return nil + } + patterns = append(patterns, pat) + } + p.config.Hosts = append(p.config.Hosts, &Host{ + Patterns: patterns, + Nodes: make([]Node, 0), + EOLComment: comment, + hasEquals: hasEquals, + }) + return p.parseStart + } + lastHost := p.config.Hosts[len(p.config.Hosts)-1] + if strings.ToLower(key.val) == "include" { + inc, err := NewInclude(strings.Split(val.val, " "), hasEquals, key.Position, comment, p.system, p.depth+1) + if err == ErrDepthExceeded { + p.raiseError(val, err) + return nil + } + if err != nil { + p.raiseErrorf(val, "Error parsing Include directive: %v", err) + return nil + } + lastHost.Nodes = append(lastHost.Nodes, inc) + return p.parseStart + } + kv := &KV{ + Key: key.val, + Value: val.val, + Comment: comment, + hasEquals: hasEquals, + leadingSpace: key.Position.Col - 1, + position: key.Position, + } + lastHost.Nodes = append(lastHost.Nodes, kv) + return p.parseStart +} + +func (p *sshParser) parseComment() sshParserStateFn { + comment := p.getToken() + lastHost := p.config.Hosts[len(p.config.Hosts)-1] + lastHost.Nodes = append(lastHost.Nodes, &Empty{ + Comment: comment.val, + // account for the "#" as well + leadingSpace: comment.Position.Col - 2, + position: comment.Position, + }) + return p.parseStart +} + +func parseSSH(flow chan token, system bool, depth uint8) *Config { + // Ensure we consume tokens to completion even if parser exits early + defer func() { + for range flow { + } + }() + + result := newConfig() + result.position = Position{1, 1} + parser := &sshParser{ + flow: flow, + config: result, + tokensBuffer: make([]token, 0), + currentTable: make([]string, 0), + seenTableKeys: make([]string, 0), + system: system, + depth: depth, + } + parser.run() + return result +} diff --git a/test/performance/vendor/github.com/pelletier/go-toml/position.go b/vendor/github.com/kevinburke/ssh_config/position.go similarity index 83% rename from test/performance/vendor/github.com/pelletier/go-toml/position.go rename to vendor/github.com/kevinburke/ssh_config/position.go index c17bff87ba..e0b5e3fb33 100644 --- a/test/performance/vendor/github.com/pelletier/go-toml/position.go +++ b/vendor/github.com/kevinburke/ssh_config/position.go @@ -1,12 +1,8 @@ -// Position support for go-toml +package ssh_config -package toml +import "fmt" -import ( - "fmt" -) - -// Position of a document element within a TOML document. +// Position of a document element within a SSH document. // // Line and Col are both 1-indexed positions for the element's line number and // column number, respectively. Values of zero or less will cause Invalid(), diff --git a/vendor/github.com/kevinburke/ssh_config/token.go b/vendor/github.com/kevinburke/ssh_config/token.go new file mode 100644 index 0000000000..a0ecbb2bb7 --- /dev/null +++ b/vendor/github.com/kevinburke/ssh_config/token.go @@ -0,0 +1,49 @@ +package ssh_config + +import "fmt" + +type token struct { + Position + typ tokenType + val string +} + +func (t token) String() string { + switch t.typ { + case tokenEOF: + return "EOF" + } + return fmt.Sprintf("%q", t.val) +} + +type tokenType int + +const ( + eof = -(iota + 1) +) + +const ( + tokenError tokenType = iota + tokenEOF + tokenEmptyLine + tokenComment + tokenKey + tokenEquals + tokenString +) + +func isSpace(r rune) bool { + return r == ' ' || r == '\t' +} + +func isKeyStartChar(r rune) bool { + return !(isSpace(r) || r == '\r' || r == '\n' || r == eof) +} + +// I'm not sure that this is correct +func isKeyChar(r rune) bool { + // Keys start with the first character that isn't whitespace or [ and end + // with the last non-whitespace character before the equals sign. Keys + // cannot contain a # character." + return !(r == '\r' || r == '\n' || r == eof || r == '=') +} diff --git a/vendor/github.com/kevinburke/ssh_config/validators.go b/vendor/github.com/kevinburke/ssh_config/validators.go new file mode 100644 index 0000000000..5977f90960 --- /dev/null +++ b/vendor/github.com/kevinburke/ssh_config/validators.go @@ -0,0 +1,186 @@ +package ssh_config + +import ( + "fmt" + "strconv" + "strings" +) + +// Default returns the default value for the given keyword, for example "22" if +// the keyword is "Port". Default returns the empty string if the keyword has no +// default, or if the keyword is unknown. Keyword matching is case-insensitive. +// +// Default values are provided by OpenSSH_7.4p1 on a Mac. +func Default(keyword string) string { + return defaults[strings.ToLower(keyword)] +} + +// Arguments where the value must be "yes" or "no" and *only* yes or no. +var yesnos = map[string]bool{ + strings.ToLower("BatchMode"): true, + strings.ToLower("CanonicalizeFallbackLocal"): true, + strings.ToLower("ChallengeResponseAuthentication"): true, + strings.ToLower("CheckHostIP"): true, + strings.ToLower("ClearAllForwardings"): true, + strings.ToLower("Compression"): true, + strings.ToLower("EnableSSHKeysign"): true, + strings.ToLower("ExitOnForwardFailure"): true, + strings.ToLower("ForwardAgent"): true, + strings.ToLower("ForwardX11"): true, + strings.ToLower("ForwardX11Trusted"): true, + strings.ToLower("GatewayPorts"): true, + strings.ToLower("GSSAPIAuthentication"): true, + strings.ToLower("GSSAPIDelegateCredentials"): true, + strings.ToLower("HostbasedAuthentication"): true, + strings.ToLower("IdentitiesOnly"): true, + strings.ToLower("KbdInteractiveAuthentication"): true, + strings.ToLower("NoHostAuthenticationForLocalhost"): true, + strings.ToLower("PasswordAuthentication"): true, + strings.ToLower("PermitLocalCommand"): true, + strings.ToLower("PubkeyAuthentication"): true, + strings.ToLower("RhostsRSAAuthentication"): true, + strings.ToLower("RSAAuthentication"): true, + strings.ToLower("StreamLocalBindUnlink"): true, + strings.ToLower("TCPKeepAlive"): true, + strings.ToLower("UseKeychain"): true, + strings.ToLower("UsePrivilegedPort"): true, + strings.ToLower("VisualHostKey"): true, +} + +var uints = map[string]bool{ + strings.ToLower("CanonicalizeMaxDots"): true, + strings.ToLower("CompressionLevel"): true, // 1 to 9 + strings.ToLower("ConnectionAttempts"): true, + strings.ToLower("ConnectTimeout"): true, + strings.ToLower("NumberOfPasswordPrompts"): true, + strings.ToLower("Port"): true, + strings.ToLower("ServerAliveCountMax"): true, + strings.ToLower("ServerAliveInterval"): true, +} + +func mustBeYesOrNo(lkey string) bool { + return yesnos[lkey] +} + +func mustBeUint(lkey string) bool { + return uints[lkey] +} + +func validate(key, val string) error { + lkey := strings.ToLower(key) + if mustBeYesOrNo(lkey) && (val != "yes" && val != "no") { + return fmt.Errorf("ssh_config: value for key %q must be 'yes' or 'no', got %q", key, val) + } + if mustBeUint(lkey) { + _, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return fmt.Errorf("ssh_config: %v", err) + } + } + return nil +} + +var defaults = map[string]string{ + strings.ToLower("AddKeysToAgent"): "no", + strings.ToLower("AddressFamily"): "any", + strings.ToLower("BatchMode"): "no", + strings.ToLower("CanonicalizeFallbackLocal"): "yes", + strings.ToLower("CanonicalizeHostname"): "no", + strings.ToLower("CanonicalizeMaxDots"): "1", + strings.ToLower("ChallengeResponseAuthentication"): "yes", + strings.ToLower("CheckHostIP"): "yes", + // TODO is this still the correct cipher + strings.ToLower("Cipher"): "3des", + strings.ToLower("Ciphers"): "chacha20-poly1305@openssh.com,aes128-ctr,aes192-ctr,aes256-ctr,aes128-gcm@openssh.com,aes256-gcm@openssh.com,aes128-cbc,aes192-cbc,aes256-cbc", + strings.ToLower("ClearAllForwardings"): "no", + strings.ToLower("Compression"): "no", + strings.ToLower("CompressionLevel"): "6", + strings.ToLower("ConnectionAttempts"): "1", + strings.ToLower("ControlMaster"): "no", + strings.ToLower("EnableSSHKeysign"): "no", + strings.ToLower("EscapeChar"): "~", + strings.ToLower("ExitOnForwardFailure"): "no", + strings.ToLower("FingerprintHash"): "sha256", + strings.ToLower("ForwardAgent"): "no", + strings.ToLower("ForwardX11"): "no", + strings.ToLower("ForwardX11Timeout"): "20m", + strings.ToLower("ForwardX11Trusted"): "no", + strings.ToLower("GatewayPorts"): "no", + strings.ToLower("GlobalKnownHostsFile"): "/etc/ssh/ssh_known_hosts /etc/ssh/ssh_known_hosts2", + strings.ToLower("GSSAPIAuthentication"): "no", + strings.ToLower("GSSAPIDelegateCredentials"): "no", + strings.ToLower("HashKnownHosts"): "no", + strings.ToLower("HostbasedAuthentication"): "no", + + strings.ToLower("HostbasedKeyTypes"): "ecdsa-sha2-nistp256-cert-v01@openssh.com,ecdsa-sha2-nistp384-cert-v01@openssh.com,ecdsa-sha2-nistp521-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-ed25519,ssh-rsa", + strings.ToLower("HostKeyAlgorithms"): "ecdsa-sha2-nistp256-cert-v01@openssh.com,ecdsa-sha2-nistp384-cert-v01@openssh.com,ecdsa-sha2-nistp521-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-ed25519,ssh-rsa", + // HostName has a dynamic default (the value passed at the command line). + + strings.ToLower("IdentitiesOnly"): "no", + strings.ToLower("IdentityFile"): "~/.ssh/identity", + + // IPQoS has a dynamic default based on interactive or non-interactive + // sessions. + + strings.ToLower("KbdInteractiveAuthentication"): "yes", + + strings.ToLower("KexAlgorithms"): "curve25519-sha256,curve25519-sha256@libssh.org,ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group-exchange-sha1,diffie-hellman-group14-sha1", + strings.ToLower("LogLevel"): "INFO", + strings.ToLower("MACs"): "umac-64-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,hmac-sha1-etm@openssh.com,umac-64@openssh.com,umac-128@openssh.com,hmac-sha2-256,hmac-sha2-512,hmac-sha1", + + strings.ToLower("NoHostAuthenticationForLocalhost"): "no", + strings.ToLower("NumberOfPasswordPrompts"): "3", + strings.ToLower("PasswordAuthentication"): "yes", + strings.ToLower("PermitLocalCommand"): "no", + strings.ToLower("Port"): "22", + + strings.ToLower("PreferredAuthentications"): "gssapi-with-mic,hostbased,publickey,keyboard-interactive,password", + strings.ToLower("Protocol"): "2", + strings.ToLower("ProxyUseFdpass"): "no", + strings.ToLower("PubkeyAcceptedKeyTypes"): "ecdsa-sha2-nistp256-cert-v01@openssh.com,ecdsa-sha2-nistp384-cert-v01@openssh.com,ecdsa-sha2-nistp521-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-ed25519,ssh-rsa", + strings.ToLower("PubkeyAuthentication"): "yes", + strings.ToLower("RekeyLimit"): "default none", + strings.ToLower("RhostsRSAAuthentication"): "no", + strings.ToLower("RSAAuthentication"): "yes", + + strings.ToLower("ServerAliveCountMax"): "3", + strings.ToLower("ServerAliveInterval"): "0", + strings.ToLower("StreamLocalBindMask"): "0177", + strings.ToLower("StreamLocalBindUnlink"): "no", + strings.ToLower("StrictHostKeyChecking"): "ask", + strings.ToLower("TCPKeepAlive"): "yes", + strings.ToLower("Tunnel"): "no", + strings.ToLower("TunnelDevice"): "any:any", + strings.ToLower("UpdateHostKeys"): "no", + strings.ToLower("UseKeychain"): "no", + strings.ToLower("UsePrivilegedPort"): "no", + + strings.ToLower("UserKnownHostsFile"): "~/.ssh/known_hosts ~/.ssh/known_hosts2", + strings.ToLower("VerifyHostKeyDNS"): "no", + strings.ToLower("VisualHostKey"): "no", + strings.ToLower("XAuthLocation"): "/usr/X11R6/bin/xauth", +} + +// these identities are used for SSH protocol 2 +var defaultProtocol2Identities = []string{ + "~/.ssh/id_dsa", + "~/.ssh/id_ecdsa", + "~/.ssh/id_ed25519", + "~/.ssh/id_rsa", +} + +// these directives support multiple items that can be collected +// across multiple files +var pluralDirectives = map[string]bool{ + "CertificateFile": true, + "IdentityFile": true, + "DynamicForward": true, + "RemoteForward": true, + "SendEnv": true, + "SetEnv": true, +} + +// SupportsMultiple reports whether a directive can be specified multiple times. +func SupportsMultiple(key string) bool { + return pluralDirectives[strings.ToLower(key)] +} diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml index 0af08e65e6..a2bf06e94f 100644 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -3,7 +3,7 @@ before: hooks: - ./gen.sh - - go install mvdan.cc/garble@latest + - go install mvdan.cc/garble@v0.7.2 builds: - diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 7469b40fa4..63f2cd5b25 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -9,7 +9,6 @@ This package provides various compression algorithms. * [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. * [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. * [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. -* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here. [![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) [![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) @@ -17,6 +16,17 @@ This package provides various compression algorithms. # changelog +* Jan 3rd, 2023 (v1.15.14) + + * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718 + * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720 + * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722 + * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723 + +* Dec 11, 2022 (v1.15.13) + * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 + * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 + * Oct 26, 2022 (v1.15.12) * zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680 diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index 07265ddede..82882961a0 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -294,7 +294,6 @@ func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, of } offset = 0 - cGain := 0 if d.chain < 100 { for i := prevHead; tries > 0; tries-- { if wEnd == win[i+length] { @@ -322,10 +321,14 @@ func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, of return } + // Minimum gain to accept a match. + cGain := 4 + // Some like it higher (CSV), some like it lower (JSON) - const baseCost = 6 + const baseCost = 3 // Base is 4 bytes at with an additional cost. // Matches must be better than this. + for i := prevHead; tries > 0; tries-- { if wEnd == win[i+length] { n := matchLen(win[i:i+minMatchLook], wPos) @@ -333,7 +336,7 @@ func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, of // Calculate gain. Estimate newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]]) - //fmt.Println(n, "gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n])) + //fmt.Println("gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n]), "this-len:", n, "prev-len:", length) if newGain > cGain { length = n offset = pos - i @@ -490,27 +493,103 @@ func (d *compressor) deflateLazy() { } if prevLength >= minMatchLength && s.length <= prevLength { - // Check for better match at end... + // No better match, but check for better match at end... // - // checkOff must be >=2 since we otherwise risk checking s.index - // Offset of 2 seems to yield best results. + // Skip forward a number of bytes. + // Offset of 2 seems to yield best results. 3 is sometimes better. const checkOff = 2 - prevIndex := s.index - 1 - if prevIndex+prevLength+checkOff < s.maxInsertIndex { - end := lookahead - if lookahead > maxMatchLength { - end = maxMatchLength - } - end += prevIndex - idx := prevIndex + prevLength - (4 - checkOff) - h := hash4(d.window[idx:]) - ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength + (4 - checkOff) - if ch2 > minIndex { - length := matchLen(d.window[prevIndex:end], d.window[ch2:]) - // It seems like a pure length metric is best. - if length > prevLength { - prevLength = length - prevOffset = prevIndex - ch2 + + // Check all, except full length + if prevLength < maxMatchLength-checkOff { + prevIndex := s.index - 1 + if prevIndex+prevLength < s.maxInsertIndex { + end := lookahead + if lookahead > maxMatchLength+checkOff { + end = maxMatchLength + checkOff + } + end += prevIndex + + // Hash at match end. + h := hash4(d.window[prevIndex+prevLength:]) + ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength + if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff { + length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:]) + // It seems like a pure length metric is best. + if length > prevLength { + prevLength = length + prevOffset = prevIndex - ch2 + + // Extend back... + for i := checkOff - 1; i >= 0; i-- { + if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i] { + // Emit tokens we "owe" + for j := 0; j <= i; j++ { + d.tokens.AddLiteral(d.window[prevIndex+j]) + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + s.index++ + if s.index < s.maxInsertIndex { + h := hash4(d.window[s.index:]) + ch := s.hashHead[h] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[h] = uint32(s.index + s.hashOffset) + } + } + break + } else { + prevLength++ + } + } + } else if false { + // Check one further ahead. + // Only rarely better, disabled for now. + prevIndex++ + h := hash4(d.window[prevIndex+prevLength:]) + ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength + if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff { + length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:]) + // It seems like a pure length metric is best. + if length > prevLength+checkOff { + prevLength = length + prevOffset = prevIndex - ch2 + prevIndex-- + + // Extend back... + for i := checkOff; i >= 0; i-- { + if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i-1] { + // Emit tokens we "owe" + for j := 0; j <= i; j++ { + d.tokens.AddLiteral(d.window[prevIndex+j]) + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { + return + } + d.tokens.Reset() + } + s.index++ + if s.index < s.maxInsertIndex { + h := hash4(d.window[s.index:]) + ch := s.hashHead[h] + s.chainHead = int(ch) + s.hashPrev[s.index&windowMask] = ch + s.hashHead[h] = uint32(s.index + s.hashOffset) + } + } + break + } else { + prevLength++ + } + } + } + } + } } } } diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go index 93a1d15031..f3d4139ef3 100644 --- a/vendor/github.com/klauspost/compress/flate/stateless.go +++ b/vendor/github.com/klauspost/compress/flate/stateless.go @@ -86,11 +86,19 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { dict = dict[len(dict)-maxStatelessDict:] } + // For subsequent loops, keep shallow dict reference to avoid alloc+copy. + var inDict []byte + for len(in) > 0 { todo := in - if len(todo) > maxStatelessBlock-len(dict) { + if len(inDict) > 0 { + if len(todo) > maxStatelessBlock-maxStatelessDict { + todo = todo[:maxStatelessBlock-maxStatelessDict] + } + } else if len(todo) > maxStatelessBlock-len(dict) { todo = todo[:maxStatelessBlock-len(dict)] } + inOrg := in in = in[len(todo):] uncompressed := todo if len(dict) > 0 { @@ -102,7 +110,11 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { todo = combined } // Compress - statelessEnc(&dst, todo, int16(len(dict))) + if len(inDict) == 0 { + statelessEnc(&dst, todo, int16(len(dict))) + } else { + statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict) + } isEof := eof && len(in) == 0 if dst.n == 0 { @@ -119,7 +131,8 @@ func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { } if len(in) > 0 { // Retain a dict if we have more - dict = todo[len(todo)-maxStatelessDict:] + inDict = inOrg[len(uncompressed)-maxStatelessDict:] + dict = nil dst.Reset() } if bw.err != nil { diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go index 6f341914c6..dac97e58a2 100644 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -146,54 +146,51 @@ func (s *Scratch) compress(src []byte) error { c1.encodeZero(tt[src[ip-2]]) ip -= 2 } + src = src[:ip] // Main compression loop. switch { case !s.zeroBits && s.actualTableLog <= 8: // We can encode 4 symbols without requiring a flush. // We do not need to check if any output is 0 bits. - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encode(tt[v0]) c1.encode(tt[v1]) c2.encode(tt[v2]) c1.encode(tt[v3]) - ip -= 4 } case !s.zeroBits: // We do not need to check if any output is 0 bits. - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encode(tt[v0]) c1.encode(tt[v1]) s.bw.flush32() c2.encode(tt[v2]) c1.encode(tt[v3]) - ip -= 4 } case s.actualTableLog <= 8: // We can encode 4 symbols without requiring a flush - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encodeZero(tt[v0]) c1.encodeZero(tt[v1]) c2.encodeZero(tt[v2]) c1.encodeZero(tt[v3]) - ip -= 4 } default: - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encodeZero(tt[v0]) c1.encodeZero(tt[v1]) s.bw.flush32() c2.encodeZero(tt[v2]) c1.encodeZero(tt[v3]) - ip -= 4 } } @@ -459,15 +456,17 @@ func (s *Scratch) countSimple(in []byte) (max int) { for _, v := range in { s.count[v]++ } - m := uint32(0) + m, symlen := uint32(0), s.symbolLen for i, v := range s.count[:] { + if v == 0 { + continue + } if v > m { m = v } - if v > 0 { - s.symbolLen = uint16(i) + 1 - } + symlen = uint16(i) + 1 } + s.symbolLen = symlen return int(m) } diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go index 504a7be9da..e36d9742f9 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -67,7 +67,6 @@ func (b *bitReaderBytes) fillFast() { // 2 bounds checks. v := b.in[b.off-4 : b.off] - v = v[:4] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << (b.bitsRead - 32) b.bitsRead -= 32 @@ -88,8 +87,7 @@ func (b *bitReaderBytes) fill() { return } if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] + v := b.in[b.off-4 : b.off] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << (b.bitsRead - 32) b.bitsRead -= 32 @@ -179,7 +177,6 @@ func (b *bitReaderShifted) fillFast() { // 2 bounds checks. v := b.in[b.off-4 : b.off] - v = v[:4] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.bitsRead -= 32 @@ -200,8 +197,7 @@ func (b *bitReaderShifted) fill() { return } if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] + v := b.in[b.off-4 : b.off] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.bitsRead -= 32 diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index d9223a91ef..cdc94856f2 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -484,34 +484,35 @@ func (s *Scratch) buildCTable() error { // Different from reference implementation. huffNode0 := s.nodes[0 : huffNodesLen+1] - for huffNode[nonNullRank].count == 0 { + for huffNode[nonNullRank].count() == 0 { nonNullRank-- } lowS := int16(nonNullRank) nodeRoot := nodeNb + lowS - 1 lowN := nodeNb - huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count - huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb) + huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count()) + huffNode[lowS].setParent(nodeNb) + huffNode[lowS-1].setParent(nodeNb) nodeNb++ lowS -= 2 for n := nodeNb; n <= nodeRoot; n++ { - huffNode[n].count = 1 << 30 + huffNode[n].setCount(1 << 30) } // fake entry, strong barrier - huffNode0[0].count = 1 << 31 + huffNode0[0].setCount(1 << 31) // create parents for nodeNb <= nodeRoot { var n1, n2 int16 - if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { n1 = lowS lowS-- } else { n1 = lowN lowN++ } - if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { n2 = lowS lowS-- } else { @@ -519,18 +520,19 @@ func (s *Scratch) buildCTable() error { lowN++ } - huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count - huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb) + huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count()) + huffNode0[n1+1].setParent(nodeNb) + huffNode0[n2+1].setParent(nodeNb) nodeNb++ } // distribute weights (unlimited tree height) - huffNode[nodeRoot].nbBits = 0 + huffNode[nodeRoot].setNbBits(0) for n := nodeRoot - 1; n >= startNode; n-- { - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) } for n := uint16(0); n <= nonNullRank; n++ { - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) } s.actualTableLog = s.setMaxHeight(int(nonNullRank)) maxNbBits := s.actualTableLog @@ -542,7 +544,7 @@ func (s *Scratch) buildCTable() error { var nbPerRank [tableLogMax + 1]uint16 var valPerRank [16]uint16 for _, v := range huffNode[:nonNullRank+1] { - nbPerRank[v.nbBits]++ + nbPerRank[v.nbBits()]++ } // determine stating value per rank { @@ -557,7 +559,7 @@ func (s *Scratch) buildCTable() error { // push nbBits per symbol, symbol order for _, v := range huffNode[:nonNullRank+1] { - s.cTable[v.symbol].nBits = v.nbBits + s.cTable[v.symbol()].nBits = v.nbBits() } // assign value within rank, symbol order @@ -603,12 +605,12 @@ func (s *Scratch) huffSort() { pos := rank[r].current rank[r].current++ prev := nodes[(pos-1)&huffNodesMask] - for pos > rank[r].base && c > prev.count { + for pos > rank[r].base && c > prev.count() { nodes[pos&huffNodesMask] = prev pos-- prev = nodes[(pos-1)&huffNodesMask] } - nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} + nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n)) } } @@ -617,7 +619,7 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { huffNode := s.nodes[1 : huffNodesLen+1] //huffNode = huffNode[: huffNodesLen] - largestBits := huffNode[lastNonNull].nbBits + largestBits := huffNode[lastNonNull].nbBits() // early exit : no elt > maxNbBits if largestBits <= maxNbBits { @@ -627,14 +629,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { baseCost := int(1) << (largestBits - maxNbBits) n := uint32(lastNonNull) - for huffNode[n].nbBits > maxNbBits { - totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)) - huffNode[n].nbBits = maxNbBits + for huffNode[n].nbBits() > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits())) + huffNode[n].setNbBits(maxNbBits) n-- } // n stops at huffNode[n].nbBits <= maxNbBits - for huffNode[n].nbBits == maxNbBits { + for huffNode[n].nbBits() == maxNbBits { n-- } // n end at index of smallest symbol using < maxNbBits @@ -655,10 +657,10 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { { currentNbBits := maxNbBits for pos := int(n); pos >= 0; pos-- { - if huffNode[pos].nbBits >= currentNbBits { + if huffNode[pos].nbBits() >= currentNbBits { continue } - currentNbBits = huffNode[pos].nbBits // < maxNbBits + currentNbBits = huffNode[pos].nbBits() // < maxNbBits rankLast[maxNbBits-currentNbBits] = uint32(pos) } } @@ -675,8 +677,8 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { if lowPos == noSymbol { break } - highTotal := huffNode[highPos].count - lowTotal := 2 * huffNode[lowPos].count + highTotal := huffNode[highPos].count() + lowTotal := 2 * huffNode[lowPos].count() if highTotal <= lowTotal { break } @@ -692,13 +694,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { // this rank is no longer empty rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] } - huffNode[rankLast[nBitsToDecrease]].nbBits++ + huffNode[rankLast[nBitsToDecrease]].setNbBits(1 + + huffNode[rankLast[nBitsToDecrease]].nbBits()) if rankLast[nBitsToDecrease] == 0 { /* special case, reached largest symbol */ rankLast[nBitsToDecrease] = noSymbol } else { rankLast[nBitsToDecrease]-- - if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease { + if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease { rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ } } @@ -706,15 +709,15 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { for totalCost < 0 { /* Sometimes, cost correction overshoot */ if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ - for huffNode[n].nbBits == maxNbBits { + for huffNode[n].nbBits() == maxNbBits { n-- } - huffNode[n+1].nbBits-- + huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1) rankLast[1] = n + 1 totalCost++ continue } - huffNode[rankLast[1]+1].nbBits-- + huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1) rankLast[1]++ totalCost++ } @@ -722,9 +725,26 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { return maxNbBits } -type nodeElt struct { - count uint32 - parent uint16 - symbol byte - nbBits uint8 +// A nodeElt is the fields +// +// count uint32 +// parent uint16 +// symbol byte +// nbBits uint8 +// +// in some order, all squashed into an integer so that the compiler +// always loads and stores entire nodeElts instead of separate fields. +type nodeElt uint64 + +func makeNodeElt(count uint32, symbol byte) nodeElt { + return nodeElt(count) | nodeElt(symbol)<<48 } + +func (e *nodeElt) count() uint32 { return uint32(*e) } +func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) } +func (e *nodeElt) symbol() byte { return byte(*e >> 48) } +func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) } + +func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) } +func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 } +func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s index 8d2187a2ce..c4c7ab2d1f 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -4,360 +4,349 @@ // func decompress4x_main_loop_amd64(ctx *decompress4xContext) TEXT ·decompress4x_main_loop_amd64(SB), $0-8 - XORQ DX, DX - // Preload values MOVQ ctx+0(FP), AX MOVBQZX 8(AX), DI - MOVQ 16(AX), SI - MOVQ 48(AX), BX - MOVQ 24(AX), R9 - MOVQ 32(AX), R10 - MOVQ (AX), R11 + MOVQ 16(AX), BX + MOVQ 48(AX), SI + MOVQ 24(AX), R8 + MOVQ 32(AX), R9 + MOVQ (AX), R10 // Main loop main_loop: - MOVQ SI, R8 - CMPQ R8, BX + XORL DX, DX + CMPQ BX, SI SETGE DL // br0.fillFast32() - MOVQ 32(R11), R12 - MOVBQZX 40(R11), R13 - CMPQ R13, $0x20 + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 JBE skip_fill0 - MOVQ 24(R11), AX - SUBQ $0x20, R13 + MOVQ 24(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ (R11), R14 + MOVQ (R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 24(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 24(R10) + ORQ R13, R11 - // exhausted = exhausted || (br0.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br0.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill0: // val0 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br0.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + MOVW AX, (BX) // update the bitreader structure - MOVQ R12, 32(R11) - MOVB R13, 40(R11) - ADDQ R9, R8 + MOVQ R11, 32(R10) + MOVB R12, 40(R10) // br1.fillFast32() - MOVQ 80(R11), R12 - MOVBQZX 88(R11), R13 - CMPQ R13, $0x20 + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 JBE skip_fill1 - MOVQ 72(R11), AX - SUBQ $0x20, R13 + MOVQ 72(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ 48(R11), R14 + MOVQ 48(R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 72(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 72(R10) + ORQ R13, R11 - // exhausted = exhausted || (br1.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br1.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill1: // val0 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br1.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + MOVW AX, (BX)(R8*1) // update the bitreader structure - MOVQ R12, 80(R11) - MOVB R13, 88(R11) - ADDQ R9, R8 + MOVQ R11, 80(R10) + MOVB R12, 88(R10) // br2.fillFast32() - MOVQ 128(R11), R12 - MOVBQZX 136(R11), R13 - CMPQ R13, $0x20 + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 JBE skip_fill2 - MOVQ 120(R11), AX - SUBQ $0x20, R13 + MOVQ 120(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ 96(R11), R14 + MOVQ 96(R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 120(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 120(R10) + ORQ R13, R11 - // exhausted = exhausted || (br2.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br2.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill2: // val0 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br2.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + MOVW AX, (BX)(R8*2) // update the bitreader structure - MOVQ R12, 128(R11) - MOVB R13, 136(R11) - ADDQ R9, R8 + MOVQ R11, 128(R10) + MOVB R12, 136(R10) // br3.fillFast32() - MOVQ 176(R11), R12 - MOVBQZX 184(R11), R13 - CMPQ R13, $0x20 + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 JBE skip_fill3 - MOVQ 168(R11), AX - SUBQ $0x20, R13 + MOVQ 168(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ 144(R11), R14 + MOVQ 144(R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 168(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 168(R10) + ORQ R13, R11 - // exhausted = exhausted || (br3.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br3.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill3: // val0 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br3.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + LEAQ (R8)(R8*2), CX + MOVW AX, (BX)(CX*1) // update the bitreader structure - MOVQ R12, 176(R11) - MOVB R13, 184(R11) - ADDQ $0x02, SI + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x02, BX TESTB DL, DL JZ main_loop MOVQ ctx+0(FP), AX - SUBQ 16(AX), SI - SHLQ $0x02, SI - MOVQ SI, 40(AX) + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) RET // func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 - XORQ DX, DX - // Preload values MOVQ ctx+0(FP), CX MOVBQZX 8(CX), DI MOVQ 16(CX), BX MOVQ 48(CX), SI - MOVQ 24(CX), R9 - MOVQ 32(CX), R10 - MOVQ (CX), R11 + MOVQ 24(CX), R8 + MOVQ 32(CX), R9 + MOVQ (CX), R10 // Main loop main_loop: - MOVQ BX, R8 - CMPQ R8, SI + XORL DX, DX + CMPQ BX, SI SETGE DL // br0.fillFast32() - MOVQ 32(R11), R12 - MOVBQZX 40(R11), R13 - CMPQ R13, $0x20 + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 JBE skip_fill0 - MOVQ 24(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ (R11), R15 + MOVQ 24(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ (R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 24(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 24(R10) + ORQ R14, R11 - // exhausted = exhausted || (br0.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br0.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill0: // val0 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -365,88 +354,86 @@ skip_fill0: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + MOVL AX, (BX) // update the bitreader structure - MOVQ R12, 32(R11) - MOVB R13, 40(R11) - ADDQ R9, R8 + MOVQ R11, 32(R10) + MOVB R12, 40(R10) // br1.fillFast32() - MOVQ 80(R11), R12 - MOVBQZX 88(R11), R13 - CMPQ R13, $0x20 + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 JBE skip_fill1 - MOVQ 72(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 48(R11), R15 + MOVQ 72(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 48(R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 72(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 72(R10) + ORQ R14, R11 - // exhausted = exhausted || (br1.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br1.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill1: // val0 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -454,88 +441,86 @@ skip_fill1: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + MOVL AX, (BX)(R8*1) // update the bitreader structure - MOVQ R12, 80(R11) - MOVB R13, 88(R11) - ADDQ R9, R8 + MOVQ R11, 80(R10) + MOVB R12, 88(R10) // br2.fillFast32() - MOVQ 128(R11), R12 - MOVBQZX 136(R11), R13 - CMPQ R13, $0x20 + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 JBE skip_fill2 - MOVQ 120(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 96(R11), R15 + MOVQ 120(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 96(R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 120(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 120(R10) + ORQ R14, R11 - // exhausted = exhausted || (br2.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br2.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill2: // val0 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -543,88 +528,86 @@ skip_fill2: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + MOVL AX, (BX)(R8*2) // update the bitreader structure - MOVQ R12, 128(R11) - MOVB R13, 136(R11) - ADDQ R9, R8 + MOVQ R11, 128(R10) + MOVB R12, 136(R10) // br3.fillFast32() - MOVQ 176(R11), R12 - MOVBQZX 184(R11), R13 - CMPQ R13, $0x20 + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 JBE skip_fill3 - MOVQ 168(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 144(R11), R15 + MOVQ 168(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 144(R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 168(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 168(R10) + ORQ R14, R11 - // exhausted = exhausted || (br3.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br3.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill3: // val0 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -632,11 +615,12 @@ skip_fill3: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + LEAQ (R8)(R8*2), CX + MOVL AX, (BX)(CX*1) // update the bitreader structure - MOVQ R12, 176(R11) - MOVB R13, 184(R11) + MOVQ R11, 176(R10) + MOVB R12, 184(R10) ADDQ $0x04, BX TESTB DL, DL JZ main_loop @@ -652,7 +636,7 @@ TEXT ·decompress1x_main_loop_amd64(SB), $0-8 MOVQ 16(CX), DX MOVQ 24(CX), BX CMPQ BX, $0x04 - JB error_max_decoded_size_exeeded + JB error_max_decoded_size_exceeded LEAQ (DX)(BX*1), BX MOVQ (CX), SI MOVQ (SI), R8 @@ -667,7 +651,7 @@ main_loop: // Check if we have room for 4 bytes in the output buffer LEAQ 4(DX), CX CMPQ CX, BX - JGE error_max_decoded_size_exeeded + JGE error_max_decoded_size_exceeded // Decode 4 values CMPQ R11, $0x20 @@ -744,7 +728,7 @@ loop_condition: RET // Report error -error_max_decoded_size_exeeded: +error_max_decoded_size_exceeded: MOVQ ctx+0(FP), AX MOVQ $-1, CX MOVQ CX, 40(AX) @@ -757,7 +741,7 @@ TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 MOVQ 16(CX), DX MOVQ 24(CX), BX CMPQ BX, $0x04 - JB error_max_decoded_size_exeeded + JB error_max_decoded_size_exceeded LEAQ (DX)(BX*1), BX MOVQ (CX), SI MOVQ (SI), R8 @@ -772,7 +756,7 @@ main_loop: // Check if we have room for 4 bytes in the output buffer LEAQ 4(DX), CX CMPQ CX, BX - JGE error_max_decoded_size_exeeded + JGE error_max_decoded_size_exceeded // Decode 4 values CMPQ R11, $0x20 @@ -839,7 +823,7 @@ loop_condition: RET // Report error -error_max_decoded_size_exeeded: +error_max_decoded_size_exceeded: MOVQ ctx+0(FP), AX MOVQ $-1, CX MOVQ CX, 40(AX) diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 6b9929ddf5..2445bb4fe5 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -192,16 +192,14 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } // Read block data. - if cap(b.dataStorage) < cSize { + if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize { + // byteBuf doesn't need a destination buffer. if b.lowMem || cSize > maxCompressedBlockSize { b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) } else { b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) } } - if cap(b.dst) <= maxSize { - b.dst = make([]byte, 0, maxSize+1) - } b.data, err = br.readBig(cSize, b.dataStorage) if err != nil { if debugDecoder { @@ -210,6 +208,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } return err } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } return nil } diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go index 1f5d41bf41..f6a240970d 100644 --- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -152,7 +152,7 @@ func (h *Header) Decode(in []byte) error { } b, in = in[:size], in[size:] h.HeaderSize += int(size) - switch size { + switch len(b) { case 1: h.DictionaryID = uint32(b[0]) case 2: @@ -182,7 +182,7 @@ func (h *Header) Decode(in []byte) error { } b, in = in[:fcsSize], in[fcsSize:] h.HeaderSize += int(fcsSize) - switch fcsSize { + switch len(b) { case 1: h.FrameContentSize = uint64(b[0]) case 2: diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index 30459cd3fb..7113e69ee3 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -40,8 +40,7 @@ type Decoder struct { frame *frameDec // Custom dictionaries. - // Always uses copies. - dicts map[uint32]dict + dicts map[uint32]*dict // streamWg is the waitgroup for all streams streamWg sync.WaitGroup @@ -103,7 +102,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { } // Transfer option dicts. - d.dicts = make(map[uint32]dict, len(d.o.dicts)) + d.dicts = make(map[uint32]*dict, len(d.o.dicts)) for _, dc := range d.o.dicts { d.dicts[dc.id] = dc } @@ -341,15 +340,8 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { } return dst, err } - if frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - return nil, ErrUnknownDictionary - } - if debugDecoder { - println("setting dict", frame.DictionaryID) - } - frame.history.setDict(&dict) + if err = d.setDict(frame); err != nil { + return nil, err } if frame.WindowSize > d.o.maxWindowSize { if debugDecoder { @@ -495,18 +487,12 @@ func (d *Decoder) nextBlockSync() (ok bool) { if !d.syncStream.inFrame { d.frame.history.reset() d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err == nil { + d.current.err = d.setDict(d.frame) + } if d.current.err != nil { return false } - if d.frame.DictionaryID != nil { - dict, ok := d.dicts[*d.frame.DictionaryID] - if !ok { - d.current.err = ErrUnknownDictionary - return false - } else { - d.frame.history.setDict(&dict) - } - } if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { d.current.err = ErrDecoderSizeExceeded return false @@ -865,13 +851,8 @@ decodeStream: if debugDecoder && err != nil { println("Frame decoder returned", err) } - if err == nil && frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - err = ErrUnknownDictionary - } else { - frame.history.setDict(&dict) - } + if err == nil { + err = d.setDict(frame) } if err == nil && d.frame.WindowSize > d.o.maxWindowSize { if debugDecoder { @@ -953,3 +934,20 @@ decodeStream: hist.reset() d.frame.history.b = frameHistCache } + +func (d *Decoder) setDict(frame *frameDec) (err error) { + dict, ok := d.dicts[frame.DictionaryID] + if ok { + if debugDecoder { + println("setting dict", frame.DictionaryID) + } + frame.history.setDict(dict) + } else if frame.DictionaryID != 0 { + // A zero or missing dictionary id is ambiguous: + // either dictionary zero, or no dictionary. In particular, + // zstd --patch-from uses this id for the source file, + // so only return an error if the dictionary id is not zero. + err = ErrUnknownDictionary + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index f42448e69c..07a90dd7af 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -6,6 +6,8 @@ package zstd import ( "errors" + "fmt" + "math/bits" "runtime" ) @@ -18,7 +20,7 @@ type decoderOptions struct { concurrent int maxDecodedSize uint64 maxWindowSize uint64 - dicts []dict + dicts []*dict ignoreChecksum bool limitToCap bool decodeBufsBelow int @@ -85,7 +87,13 @@ func WithDecoderMaxMemory(n uint64) DOption { } // WithDecoderDicts allows to register one or more dictionaries for the decoder. -// If several dictionaries with the same ID is provided the last one will be used. +// +// Each slice in dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// If several dictionaries with the same ID are provided, the last one will be used. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format func WithDecoderDicts(dicts ...[]byte) DOption { return func(o *decoderOptions) error { for _, b := range dicts { @@ -93,12 +101,24 @@ func WithDecoderDicts(dicts ...[]byte) DOption { if err != nil { return err } - o.dicts = append(o.dicts, *d) + o.dicts = append(o.dicts, d) } return nil } } +// WithEncoderDictRaw registers a dictionary that may be used by the decoder. +// The slice content can be arbitrary data. +func WithDecoderDictRaw(id uint32, content []byte) DOption { + return func(o *decoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}) + return nil + } +} + // WithDecoderMaxWindow allows to set a maximum window size for decodes. // This allows rejecting packets that will cause big memory usage. // The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index b2725f77b5..66a95c18ef 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -21,6 +21,9 @@ type dict struct { const dictMagic = "\x37\xa4\x30\xec" +// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB. +const dictMaxLength = 1 << 31 + // ID returns the dictionary id or 0 if d is nil. func (d *dict) ID() uint32 { if d == nil { diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 6015f498af..8e15be2f7f 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "math" + "math/bits" "runtime" "strings" ) @@ -305,7 +306,13 @@ func WithLowerEncoderMem(b bool) EOption { } // WithEncoderDict allows to register a dictionary that will be used for the encode. +// +// The slice dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// // The encoder *may* choose to use no dictionary instead for certain payloads. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format func WithEncoderDict(dict []byte) EOption { return func(o *encoderOptions) error { d, err := loadDict(dict) @@ -316,3 +323,17 @@ func WithEncoderDict(dict []byte) EOption { return nil } } + +// WithEncoderDictRaw registers a dictionary that may be used by the encoder. +// +// The slice content may contain arbitrary data. It will be used as an initial +// history. +func WithEncoderDictRaw(id uint32, content []byte) EOption { + return func(o *encoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 6cb3b4e55f..d8e8a05bd7 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -29,7 +29,7 @@ type frameDec struct { FrameContentSize uint64 - DictionaryID *uint32 + DictionaryID uint32 HasCheckSum bool SingleSegment bool } @@ -155,7 +155,7 @@ func (d *frameDec) reset(br byteBuffer) error { // Read Dictionary_ID // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - d.DictionaryID = nil + d.DictionaryID = 0 if size := fhd & 3; size != 0 { if size == 3 { size = 4 @@ -167,7 +167,7 @@ func (d *frameDec) reset(br byteBuffer) error { return err } var id uint32 - switch size { + switch len(b) { case 1: id = uint32(b[0]) case 2: @@ -178,11 +178,7 @@ func (d *frameDec) reset(br byteBuffer) error { if debugDecoder { println("Dict size", size, "ID:", id) } - if id > 0 { - // ID 0 means "sorry, no dictionary anyway". - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format - d.DictionaryID = &id - } + d.DictionaryID = id } // Read Frame_Content_Size @@ -204,7 +200,7 @@ func (d *frameDec) reset(br byteBuffer) error { println("Reading Frame content", err) return err } - switch fcsSize { + switch len(b) { case 1: d.FrameContentSize = uint64(b[0]) case 2: diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index 52e5703c26..b94993a072 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -320,10 +320,6 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET - // Return with not enough output space error - MOVQ $0x00000005, ret+24(FP) - RET - // func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: CMOV TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 @@ -617,10 +613,6 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET - // Return with not enough output space error - MOVQ $0x00000005, ret+24(FP) - RET - // func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: BMI, BMI2, CMOV TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 @@ -897,10 +889,6 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET - // Return with not enough output space error - MOVQ $0x00000005, ret+24(FP) - RET - // func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: BMI, BMI2, CMOV TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 @@ -1152,10 +1140,6 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET - // Return with not enough output space error - MOVQ $0x00000005, ret+24(FP) - RET - // func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool // Requires: SSE TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 @@ -1389,8 +1373,7 @@ loop_finished: MOVQ ctx+0(FP), AX MOVQ DX, 24(AX) MOVQ DI, 104(AX) - MOVQ 80(AX), CX - SUBQ CX, SI + SUBQ 80(AX), SI MOVQ SI, 112(AX) RET @@ -1402,8 +1385,7 @@ error_match_off_too_big: MOVQ ctx+0(FP), AX MOVQ DX, 24(AX) MOVQ DI, 104(AX) - MOVQ 80(AX), CX - SUBQ CX, SI + SUBQ 80(AX), SI MOVQ SI, 112(AX) RET @@ -1747,8 +1729,7 @@ loop_finished: MOVQ ctx+0(FP), AX MOVQ DX, 24(AX) MOVQ DI, 104(AX) - MOVQ 80(AX), CX - SUBQ CX, SI + SUBQ 80(AX), SI MOVQ SI, 112(AX) RET @@ -1760,8 +1741,7 @@ error_match_off_too_big: MOVQ ctx+0(FP), AX MOVQ DX, 24(AX) MOVQ DI, 104(AX) - MOVQ 80(AX), CX - SUBQ CX, SI + SUBQ 80(AX), SI MOVQ SI, 112(AX) RET diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index b1886f7c74..5ffa82f5ac 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -72,7 +72,6 @@ var ( ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") // ErrUnknownDictionary is returned if the dictionary ID is unknown. - // For the time being dictionaries are not supported. ErrUnknownDictionary = errors.New("unknown dictionary") // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. diff --git a/vendor/github.com/maxbrunsfeld/counterfeiter/v6/appveyor.yml b/vendor/github.com/maxbrunsfeld/counterfeiter/v6/appveyor.yml index 7fcc54d564..0e6785b032 100644 --- a/vendor/github.com/maxbrunsfeld/counterfeiter/v6/appveyor.yml +++ b/vendor/github.com/maxbrunsfeld/counterfeiter/v6/appveyor.yml @@ -3,7 +3,7 @@ shallow_clone: true clone_depth: 10 clone_folder: c:\projects\counterfeiter image: Visual Studio 2019 -stack: go 1.18 +stack: go 1.20 environment: GOPATH: c:\gopath diff --git a/vendor/github.com/muesli/mango-cobra/.gitignore b/vendor/github.com/muesli/mango-cobra/.gitignore new file mode 100644 index 0000000000..84b95d92c2 --- /dev/null +++ b/vendor/github.com/muesli/mango-cobra/.gitignore @@ -0,0 +1,17 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +example/example diff --git a/vendor/github.com/muesli/mango-cobra/.golangci-soft.yml b/vendor/github.com/muesli/mango-cobra/.golangci-soft.yml new file mode 100644 index 0000000000..ef456e0605 --- /dev/null +++ b/vendor/github.com/muesli/mango-cobra/.golangci-soft.yml @@ -0,0 +1,47 @@ +run: + tests: false + +issues: + include: + - EXC0001 + - EXC0005 + - EXC0011 + - EXC0012 + - EXC0013 + + max-issues-per-linter: 0 + max-same-issues: 0 + +linters: + enable: + # - dupl + - exhaustive + # - exhaustivestruct + - goconst + - godot + - godox + - gomnd + - gomoddirectives + - goprintffuncname + - ifshort + # - lll + - misspell + - nakedret + - nestif + - noctx + - nolintlint + - prealloc + - wrapcheck + + # disable default linters, they are already enabled in .golangci.yml + disable: + - deadcode + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - structcheck + - typecheck + - unused + - varcheck diff --git a/vendor/github.com/muesli/mango-cobra/.golangci.yml b/vendor/github.com/muesli/mango-cobra/.golangci.yml new file mode 100644 index 0000000000..a5a91d0d91 --- /dev/null +++ b/vendor/github.com/muesli/mango-cobra/.golangci.yml @@ -0,0 +1,29 @@ +run: + tests: false + +issues: + include: + - EXC0001 + - EXC0005 + - EXC0011 + - EXC0012 + - EXC0013 + + max-issues-per-linter: 0 + max-same-issues: 0 + +linters: + enable: + - bodyclose + - exportloopref + - goimports + - gosec + - nilerr + - predeclared + - revive + - rowserrcheck + - sqlclosecheck + - tparallel + - unconvert + - unparam + - whitespace diff --git a/vendor/github.com/muesli/mango-cobra/LICENSE b/vendor/github.com/muesli/mango-cobra/LICENSE new file mode 100644 index 0000000000..6a766f8e02 --- /dev/null +++ b/vendor/github.com/muesli/mango-cobra/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Christian Muehlhaeuser + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/muesli/mango-cobra/README.md b/vendor/github.com/muesli/mango-cobra/README.md new file mode 100644 index 0000000000..a09a791439 --- /dev/null +++ b/vendor/github.com/muesli/mango-cobra/README.md @@ -0,0 +1,39 @@ +# mango-cobra + +[![Latest Release](https://img.shields.io/github/release/muesli/mango-cobra.svg)](https://github.com/muesli/mango-cobra/releases) +[![Build Status](https://github.com/muesli/mango-cobra/workflows/build/badge.svg)](https://github.com/muesli/mango-cobra/actions) +[![Go ReportCard](https://goreportcard.com/badge/muesli/mango-cobra)](https://goreportcard.com/report/muesli/mango-cobra) +[![GoDoc](https://godoc.org/github.com/golang/gddo?status.svg)](https://pkg.go.dev/github.com/muesli/mango-cobra) + +cobra adapter for [mango](https://github.com/muesli/mango). + +## Example + +```go +import ( + "fmt" + + mcobra "github.com/muesli/mango-cobra" + "github.com/muesli/roff" + "github.com/spf13/cobra" +) + +var ( + rootCmd = &cobra.Command{ + Use: "mango", + Short: "A man-page generator", + } +) + +func main() { + manPage, err := mcobra.NewManPage(1, rootCmd) + if err != nil { + panic(err) + } + + manPage = manPage.WithSection("Copyright", "(C) 2022 Christian Muehlhaeuser.\n"+ + "Released under MIT license.") + + fmt.Println(manPage.Build(roff.NewDocument())) +} +``` diff --git a/vendor/github.com/muesli/mango-cobra/mcobra.go b/vendor/github.com/muesli/mango-cobra/mcobra.go new file mode 100644 index 0000000000..f0bd0ece15 --- /dev/null +++ b/vendor/github.com/muesli/mango-cobra/mcobra.go @@ -0,0 +1,61 @@ +package mcobra + +import ( + "github.com/muesli/mango" + mpflag "github.com/muesli/mango-pflag" + "github.com/spf13/cobra" +) + +// NewManPageFromCobra creates a new mango.ManPage from a cobra.Command. +func NewManPage(section uint, c *cobra.Command) (*mango.ManPage, error) { + manPage := mango.NewManPage(section, c.Name(), c.Short). + WithLongDescription(c.Long) + + if err := AddCommand(manPage, c); err != nil { + return nil, err + } + return manPage, nil +} + +// AddCommand adds a cobra.Command to a mango.ManPage. +func AddCommand(m *mango.ManPage, c *cobra.Command) error { + return addCommandTree(m, c, nil) +} + +func addCommandTree(m *mango.ManPage, c *cobra.Command, parent *mango.Command) error { + var item *mango.Command + if parent == nil { + // set root command + item = mango.NewCommand(c.Name(), "", "") + m.Root = *item + } else { + item = mango.NewCommand(c.Name(), c.Short, c.Use) + if err := parent.AddCommand(item); err != nil { + return err + } + } + + // add commands + if c.HasSubCommands() { + for _, sub := range c.Commands() { + if sub.Hidden { + // ignore hidden commands + continue + } + + if err := addCommandTree(m, sub, item); err != nil { + return err + } + } + } + + // add flags + if c.HasFlags() { + c.Flags().VisitAll(mpflag.PFlagCommandVisitor(item)) + } + if c.HasPersistentFlags() { + c.PersistentFlags().VisitAll(mpflag.PFlagCommandVisitor(item)) + } + + return nil +} diff --git a/vendor/github.com/muesli/mango-pflag/.gitignore b/vendor/github.com/muesli/mango-pflag/.gitignore new file mode 100644 index 0000000000..84b95d92c2 --- /dev/null +++ b/vendor/github.com/muesli/mango-pflag/.gitignore @@ -0,0 +1,17 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +example/example diff --git a/vendor/github.com/muesli/mango-pflag/.golangci-soft.yml b/vendor/github.com/muesli/mango-pflag/.golangci-soft.yml new file mode 100644 index 0000000000..ef456e0605 --- /dev/null +++ b/vendor/github.com/muesli/mango-pflag/.golangci-soft.yml @@ -0,0 +1,47 @@ +run: + tests: false + +issues: + include: + - EXC0001 + - EXC0005 + - EXC0011 + - EXC0012 + - EXC0013 + + max-issues-per-linter: 0 + max-same-issues: 0 + +linters: + enable: + # - dupl + - exhaustive + # - exhaustivestruct + - goconst + - godot + - godox + - gomnd + - gomoddirectives + - goprintffuncname + - ifshort + # - lll + - misspell + - nakedret + - nestif + - noctx + - nolintlint + - prealloc + - wrapcheck + + # disable default linters, they are already enabled in .golangci.yml + disable: + - deadcode + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - structcheck + - typecheck + - unused + - varcheck diff --git a/vendor/github.com/muesli/mango-pflag/.golangci.yml b/vendor/github.com/muesli/mango-pflag/.golangci.yml new file mode 100644 index 0000000000..a5a91d0d91 --- /dev/null +++ b/vendor/github.com/muesli/mango-pflag/.golangci.yml @@ -0,0 +1,29 @@ +run: + tests: false + +issues: + include: + - EXC0001 + - EXC0005 + - EXC0011 + - EXC0012 + - EXC0013 + + max-issues-per-linter: 0 + max-same-issues: 0 + +linters: + enable: + - bodyclose + - exportloopref + - goimports + - gosec + - nilerr + - predeclared + - revive + - rowserrcheck + - sqlclosecheck + - tparallel + - unconvert + - unparam + - whitespace diff --git a/vendor/github.com/muesli/mango-pflag/LICENSE b/vendor/github.com/muesli/mango-pflag/LICENSE new file mode 100644 index 0000000000..6a766f8e02 --- /dev/null +++ b/vendor/github.com/muesli/mango-pflag/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Christian Muehlhaeuser + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/muesli/mango-pflag/README.md b/vendor/github.com/muesli/mango-pflag/README.md new file mode 100644 index 0000000000..0145131306 --- /dev/null +++ b/vendor/github.com/muesli/mango-pflag/README.md @@ -0,0 +1,33 @@ +# mango-pflag + +[![Latest Release](https://img.shields.io/github/release/muesli/mango-pflag.svg)](https://github.com/muesli/mango-pflag/releases) +[![Build Status](https://github.com/muesli/mango-pflag/workflows/build/badge.svg)](https://github.com/muesli/mango-pflag/actions) +[![Go ReportCard](https://goreportcard.com/badge/muesli/mango-pflag)](https://goreportcard.com/report/muesli/mango-pflag) +[![GoDoc](https://godoc.org/github.com/golang/gddo?status.svg)](https://pkg.go.dev/github.com/muesli/mango-pflag) + +pflag adapter for [mango](https://github.com/muesli/mango). + +## Example + +```go +import ( + "fmt" + + "github.com/muesli/mango" + mpflag "github.com/muesli/mango-pflag" + "github.com/muesli/roff" + flag "github.com/spf13/pflag" +) + +func main() { + flag.Parse() + + manPage := mango.NewManPage(1, "mango", "a man-page generator"). + WithLongDescription("mango is a man-page generator for Go."). + WithSection("Copyright", "(C) 2022 Christian Muehlhaeuser.\n"+ + "Released under MIT license.") + + flag.VisitAll(mpflag.PFlagVisitor(manPage)) + fmt.Println(manPage.Build(roff.NewDocument())) +} +``` diff --git a/vendor/github.com/muesli/mango-pflag/mpflag.go b/vendor/github.com/muesli/mango-pflag/mpflag.go new file mode 100644 index 0000000000..644c87d658 --- /dev/null +++ b/vendor/github.com/muesli/mango-pflag/mpflag.go @@ -0,0 +1,30 @@ +package mpflag + +import ( + "github.com/muesli/mango" + "github.com/spf13/pflag" +) + +// PFlagVisitor is used to visit all flags and track them in a mango.ManPage. +func PFlagVisitor(m *mango.ManPage) func(*pflag.Flag) { + return func(f *pflag.Flag) { + _ = m.Root.AddFlag(mango.Flag{ + Name: f.Name, + Short: f.Shorthand, + Usage: f.Usage, + PFlag: true, + }) + } +} + +// PFlagCommandVisitor is used to visit all flags and track them in a mango.Command. +func PFlagCommandVisitor(c *mango.Command) func(*pflag.Flag) { + return func(f *pflag.Flag) { + _ = c.AddFlag(mango.Flag{ + Name: f.Name, + Short: f.Shorthand, + Usage: f.Usage, + PFlag: true, + }) + } +} diff --git a/vendor/github.com/muesli/mango/.gitignore b/vendor/github.com/muesli/mango/.gitignore new file mode 100644 index 0000000000..c37d41fe25 --- /dev/null +++ b/vendor/github.com/muesli/mango/.gitignore @@ -0,0 +1,19 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +examples/cobra/cobra +examples/flag/flag +examples/pflag/pflag diff --git a/vendor/github.com/muesli/mango/.golangci-soft.yml b/vendor/github.com/muesli/mango/.golangci-soft.yml new file mode 100644 index 0000000000..ef456e0605 --- /dev/null +++ b/vendor/github.com/muesli/mango/.golangci-soft.yml @@ -0,0 +1,47 @@ +run: + tests: false + +issues: + include: + - EXC0001 + - EXC0005 + - EXC0011 + - EXC0012 + - EXC0013 + + max-issues-per-linter: 0 + max-same-issues: 0 + +linters: + enable: + # - dupl + - exhaustive + # - exhaustivestruct + - goconst + - godot + - godox + - gomnd + - gomoddirectives + - goprintffuncname + - ifshort + # - lll + - misspell + - nakedret + - nestif + - noctx + - nolintlint + - prealloc + - wrapcheck + + # disable default linters, they are already enabled in .golangci.yml + disable: + - deadcode + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - structcheck + - typecheck + - unused + - varcheck diff --git a/vendor/github.com/muesli/mango/.golangci.yml b/vendor/github.com/muesli/mango/.golangci.yml new file mode 100644 index 0000000000..a5a91d0d91 --- /dev/null +++ b/vendor/github.com/muesli/mango/.golangci.yml @@ -0,0 +1,29 @@ +run: + tests: false + +issues: + include: + - EXC0001 + - EXC0005 + - EXC0011 + - EXC0012 + - EXC0013 + + max-issues-per-linter: 0 + max-same-issues: 0 + +linters: + enable: + - bodyclose + - exportloopref + - goimports + - gosec + - nilerr + - predeclared + - revive + - rowserrcheck + - sqlclosecheck + - tparallel + - unconvert + - unparam + - whitespace diff --git a/vendor/github.com/muesli/mango/LICENSE b/vendor/github.com/muesli/mango/LICENSE new file mode 100644 index 0000000000..6a766f8e02 --- /dev/null +++ b/vendor/github.com/muesli/mango/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Christian Muehlhaeuser + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/muesli/mango/README.md b/vendor/github.com/muesli/mango/README.md new file mode 100644 index 0000000000..5d8b5f637e --- /dev/null +++ b/vendor/github.com/muesli/mango/README.md @@ -0,0 +1,150 @@ +# mango + +[![Build Status](https://github.com/muesli/mango/workflows/build/badge.svg)](https://github.com/muesli/mango/actions) +[![Go ReportCard](https://goreportcard.com/badge/muesli/mango)](https://goreportcard.com/report/muesli/mango) +[![GoDoc](https://godoc.org/github.com/golang/gddo?status.svg)](https://pkg.go.dev/github.com/muesli/mango) + +mango is a man-page generator for the Go flag, pflag, cobra, and coral packages. +It extracts commands, flags, and arguments from your program and enables it to +self-document. + +## Adapters + +Currently the following adapters exist: + +- flag: support for Go's standard flag package +- [mango-cobra](https://github.com/muesli/mango-cobra): an adapter for [cobra](https://github.com/spf13/cobra) +- [mango-coral](https://github.com/muesli/mango-coral): an adapter for [coral](https://github.com/muesli/coral) +- [mango-pflag](https://github.com/muesli/mango-pflag): an adapter for the [pflag](https://github.com/spf13/pflag) package + +## Usage with flag: + +```go +import ( + "flag" + "fmt" + + "github.com/muesli/mango" + "github.com/muesli/mango/mflag" + "github.com/muesli/roff" +) + +var ( + one = flag.String("one", "", "first value") + two = flag.String("two", "", "second value") +) + +func main() { + flag.Parse() + + manPage := mango.NewManPage(1, "mango", "mango - a man-page generator"). + WithLongDescription("mango is a man-page generator for Go.\n"+ + "Features:\n"+ + "* User-friendly\n"+ + "* Plugable"). + WithSection("Copyright", "(C) 2022 Christian Muehlhaeuser.\n"+ + "Released under MIT license.") + + flag.VisitAll(mflag.FlagVisitor(manPage)) + fmt.Println(manPage.Build(roff.NewDocument())) +} +``` + +Mango will extract all the flags from your app and generate a man-page similar +to this example: + +![mango](/mango.png) + +## Usage with pflag: + +```go +import ( + "fmt" + + "github.com/muesli/mango" + mpflag "github.com/muesli/mango-pflag" + "github.com/muesli/roff" + flag "github.com/spf13/pflag" +) + +func main() { + flag.Parse() + + manPage := mango.NewManPage(1, "mango", "mango - a man-page generator"). + WithLongDescription("mango is a man-page generator for Go."). + WithSection("Copyright", "(C) 2022 Christian Muehlhaeuser.\n"+ + "Released under MIT license.") + + flag.VisitAll(mpflag.PFlagVisitor(manPage)) + fmt.Println(manPage.Build(roff.NewDocument())) +} +``` + +## Usage with cobra: + +```go +import ( + "fmt" + + mcobra "github.com/muesli/mango-cobra" + "github.com/muesli/roff" + "github.com/spf13/cobra" +) + +var ( + rootCmd = &cobra.Command{ + Use: "mango", + Short: "A man-page generator", + } +) + +func main() { + manPage, err := mcobra.NewManPage(1, rootCmd) + if err != nil { + panic(err) + } + + manPage = manPage.WithSection("Copyright", "(C) 2022 Christian Muehlhaeuser.\n"+ + "Released under MIT license.") + + fmt.Println(manPage.Build(roff.NewDocument())) +} +``` + +## Usage with coral: + +```go +import ( + "fmt" + + mcoral "github.com/muesli/mango-coral" + "github.com/muesli/roff" + "github.com/muesli/coral" +) + +var ( + rootCmd = &coral.Command{ + Use: "mango", + Short: "A man-page generator", + } +) + +func main() { + manPage, err := mcoral.NewManPage(1, rootCmd) + if err != nil { + panic(err) + } + + manPage = manPage.WithSection("Copyright", "(C) 2022 Christian Muehlhaeuser.\n"+ + "Released under MIT license.") + + fmt.Println(manPage.Build(roff.NewDocument())) +} +``` + +## Feedback + +Got some feedback or suggestions? Please open an issue or drop me a note! + +* [Twitter](https://twitter.com/mueslix) +* [The Fediverse](https://mastodon.social/@fribbledom) diff --git a/vendor/github.com/muesli/mango/builder.go b/vendor/github.com/muesli/mango/builder.go new file mode 100644 index 0000000000..ae813478b2 --- /dev/null +++ b/vendor/github.com/muesli/mango/builder.go @@ -0,0 +1,21 @@ +package mango + +import ( + "time" +) + +// Builder is the interface of a man page builder. +type Builder interface { + Heading(section uint, title, description string, ts time.Time) + Paragraph() + Indent(n int) + IndentEnd() + TaggedParagraph(indentation int) + List(text string) + Section(text string) + EndSection() + Text(text string) + TextBold(text string) + TextItalic(text string) + String() string +} diff --git a/vendor/github.com/muesli/mango/mango.go b/vendor/github.com/muesli/mango/mango.go new file mode 100644 index 0000000000..fee6a04195 --- /dev/null +++ b/vendor/github.com/muesli/mango/mango.go @@ -0,0 +1,221 @@ +package mango + +import ( + "errors" + "fmt" + "sort" + "strings" + "time" +) + +// ManPage represents a man page generator. +type ManPage struct { + Root Command + sections []Section + section uint + description string + longDescription string +} + +// Command represents a command. +type Command struct { + Name string + Short string + Usage string + Flags map[string]Flag + Commands map[string]*Command +} + +// Flag represents a flag. +type Flag struct { + Name string + Short string + Usage string + PFlag bool +} + +// Section represents a section. +type Section struct { + Name string + Text string +} + +// NewManPage returns a new ManPage generator instance. +func NewManPage(section uint, title string, description string) *ManPage { + root := NewCommand(title, "", "") + return &ManPage{ + Root: *root, + section: section, + description: description, + } +} + +// NewCommand returns a new Command. +func NewCommand(name string, short string, usage string) *Command { + return &Command{ + Name: name, + Short: short, + Usage: usage, + Flags: make(map[string]Flag), + Commands: make(map[string]*Command), + } +} + +// WithLongDescription sets the long description. +func (m *ManPage) WithLongDescription(desc string) *ManPage { + m.longDescription = desc + return m +} + +// WithSection adds a section to the man page. +func (m *ManPage) WithSection(section string, text string) *ManPage { + m.sections = append(m.sections, Section{ + Name: section, + Text: text, + }) + return m +} + +// AddFlag adds a flag to the command. +func (m *Command) AddFlag(f Flag) error { + if _, found := m.Flags[f.Name]; found { + return errors.New("duplicate flag: " + f.Name) + } + + m.Flags[f.Name] = f + return nil +} + +// AddCommand adds a sub-command. +func (m *Command) AddCommand(c *Command) error { + if _, found := m.Commands[c.Name]; found { + return errors.New("duplicate command: " + c.Name) + } + + m.Commands[c.Name] = c + return nil +} + +func (m ManPage) buildCommand(w Builder, c Command) { + if len(c.Flags) > 0 { + if c.Name == m.Root.Name { + w.Section("Options") + w.TaggedParagraph(-1) + } else { + w.TaggedParagraph(-1) + w.TextBold("OPTIONS") + w.Indent(4) + } + keys := make([]string, 0, len(c.Flags)) + for k := range c.Flags { + keys = append(keys, k) + } + sort.Strings(keys) + + for i, k := range keys { + opt := c.Flags[k] + if i > 0 { + w.TaggedParagraph(-1) + } + + prefix := "-" + if opt.PFlag { + prefix = "--" + } + + if opt.Short != "" { + w.TextBold(fmt.Sprintf("%[1]s%[2]s %[1]s%[3]s", prefix, opt.Short, opt.Name)) + } else { + w.TextBold(prefix + opt.Name) + } + w.EndSection() + + w.Text(strings.ReplaceAll(opt.Usage, "\n", " ")) + } + + if c.Name == m.Root.Name { + } else { + w.IndentEnd() + } + } + + if len(c.Commands) > 0 { + if c.Name == m.Root.Name { + w.Section("Commands") + w.TaggedParagraph(-1) + } else { + w.TaggedParagraph(-1) + w.TextBold("COMMANDS") + w.Indent(4) + } + keys := make([]string, 0, len(c.Commands)) + for k := range c.Commands { + keys = append(keys, k) + } + sort.Strings(keys) + + for i, k := range keys { + opt := c.Commands[k] + if i > 0 { + w.TaggedParagraph(-1) + } + + w.TextBold(opt.Name) + if opt.Usage != "" { + w.Text(strings.TrimPrefix(opt.Usage, opt.Name)) + } + w.Indent(4) + w.Text(strings.ReplaceAll(opt.Short, "\n", " ")) + w.IndentEnd() + + m.buildCommand(w, *opt) + } + + if c.Name == m.Root.Name { + } else { + w.IndentEnd() + } + } +} + +// Build generates the man page. +func (m ManPage) Build(w Builder) string { + /* + Common order: + - name + - synopsis + - description + - options + - exit status + - usage + - notes + - authors + - copyright + - see also + */ + + w.Heading(m.section, m.Root.Name, m.description, time.Now()) + + w.Section("Name") + w.Text(m.Root.Name + " - " + m.description) + + w.Section("Synopsis") + w.TextBold(m.Root.Name) + w.Text(" [") + w.TextItalic("options...") + w.Text("] [") + w.TextItalic("argument...") + w.Text("]") + + w.Section("Description") + w.Text(m.longDescription) + + m.buildCommand(w, m.Root) + + for _, v := range m.sections { + w.Section(v.Name) + w.Text(v.Text) + } + + return w.String() +} diff --git a/vendor/github.com/muesli/mango/mango.png b/vendor/github.com/muesli/mango/mango.png new file mode 100644 index 0000000000..a521ccd711 Binary files /dev/null and b/vendor/github.com/muesli/mango/mango.png differ diff --git a/vendor/github.com/muesli/roff/.gitignore b/vendor/github.com/muesli/roff/.gitignore new file mode 100644 index 0000000000..66fd13c903 --- /dev/null +++ b/vendor/github.com/muesli/roff/.gitignore @@ -0,0 +1,15 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ diff --git a/vendor/github.com/muesli/roff/.golangci-soft.yml b/vendor/github.com/muesli/roff/.golangci-soft.yml new file mode 100644 index 0000000000..ef456e0605 --- /dev/null +++ b/vendor/github.com/muesli/roff/.golangci-soft.yml @@ -0,0 +1,47 @@ +run: + tests: false + +issues: + include: + - EXC0001 + - EXC0005 + - EXC0011 + - EXC0012 + - EXC0013 + + max-issues-per-linter: 0 + max-same-issues: 0 + +linters: + enable: + # - dupl + - exhaustive + # - exhaustivestruct + - goconst + - godot + - godox + - gomnd + - gomoddirectives + - goprintffuncname + - ifshort + # - lll + - misspell + - nakedret + - nestif + - noctx + - nolintlint + - prealloc + - wrapcheck + + # disable default linters, they are already enabled in .golangci.yml + disable: + - deadcode + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - structcheck + - typecheck + - unused + - varcheck diff --git a/vendor/github.com/muesli/roff/.golangci.yml b/vendor/github.com/muesli/roff/.golangci.yml new file mode 100644 index 0000000000..a5a91d0d91 --- /dev/null +++ b/vendor/github.com/muesli/roff/.golangci.yml @@ -0,0 +1,29 @@ +run: + tests: false + +issues: + include: + - EXC0001 + - EXC0005 + - EXC0011 + - EXC0012 + - EXC0013 + + max-issues-per-linter: 0 + max-same-issues: 0 + +linters: + enable: + - bodyclose + - exportloopref + - goimports + - gosec + - nilerr + - predeclared + - revive + - rowserrcheck + - sqlclosecheck + - tparallel + - unconvert + - unparam + - whitespace diff --git a/vendor/github.com/muesli/roff/LICENSE b/vendor/github.com/muesli/roff/LICENSE new file mode 100644 index 0000000000..6a766f8e02 --- /dev/null +++ b/vendor/github.com/muesli/roff/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Christian Muehlhaeuser + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/muesli/roff/README.md b/vendor/github.com/muesli/roff/README.md new file mode 100644 index 0000000000..3a44e665ca --- /dev/null +++ b/vendor/github.com/muesli/roff/README.md @@ -0,0 +1,65 @@ +# roff + +[![Latest Release](https://img.shields.io/github/release/muesli/roff.svg)](https://github.com/muesli/roff/releases) +[![Build Status](https://github.com/muesli/roff/workflows/build/badge.svg)](https://github.com/muesli/roff/actions) +[![Coverage Status](https://coveralls.io/repos/github/muesli/roff/badge.svg?branch=main)](https://coveralls.io/github/muesli/roff?branch=main) +[![Go ReportCard](https://goreportcard.com/badge/muesli/roff)](https://goreportcard.com/report/muesli/roff) +[![GoDoc](https://godoc.org/github.com/golang/gddo?status.svg)](https://pkg.go.dev/github.com/muesli/roff) + +roff lets you write roff documents in Go + +## Tutorial + +Import the library: + +```go +import "github.com/muesli/roff" +``` + +Then start a new roff document and write to it: + +```go +doc := roff.NewDocument() +doc.Heading(1, "Title", "A short description", time.Now()) + +// a section of text +doc.Section("Introduction") +doc.Text("Here is a quick introduction to writing roff documents with Go!") + +// fonts +doc.Section("Fonts") +doc.Text("This is a text with a bold font: ") +doc.TextBold("I am bold!") +doc.Paragraph() +doc.Text("This is a text with an italic font: ") +doc.TextItalic("I am italic!") + +// indentation +doc.Section("Indentation") +doc.Text("This block of text is left-aligned to the section.") +doc.Indent(4) +doc.Text("This block of text is totally indented.") +doc.IndentEnd() +doc.Text("... left-aligned again!") + +// lists +doc.Section("Lists") +doc.Text("A list:") +doc.Paragraph() +doc.Indent(4) +doc.List("First list item") +doc.List("Second list item") +``` + +Fetch the roff document as a string and you're done: + +```go +fmt.Println(doc.String()) +``` + +## Feedback + +Got some feedback or suggestions? Please open an issue or drop me a note! + +* [Twitter](https://twitter.com/mueslix) +* [The Fediverse](https://mastodon.social/@fribbledom) diff --git a/vendor/github.com/muesli/roff/roff.go b/vendor/github.com/muesli/roff/roff.go new file mode 100644 index 0000000000..4f6799004d --- /dev/null +++ b/vendor/github.com/muesli/roff/roff.go @@ -0,0 +1,163 @@ +package roff + +import ( + "bytes" + "fmt" + "strings" + "time" +) + +const ( + // Title heading (Document structure macro) + TitleHeading = `.TH %[1]s %[2]d "%[4]s" "%[3]s" "%[5]s"` + // Paragraph macro + Paragraph = "\n.PP" + // Relative-indent start (Document structure macro) + Indent = "\n.RS" + // Relative-indent end (Document structure macro) + IndentEnd = "\n.RE" + // Indented paragraph + IndentedParagraph = "\n.IP" + // Section heading (Document structure macro) + SectionHeading = "\n.SH %s" + // Tagged paragraph + TaggedParagraph = "\n.TP" + + // Bold escape + Bold = `\fB` + // Italic escape + Italic = `\fI` + // Return to previous font setting + PreviousFont = `\fP` +) + +// Document is a roff document. +type Document struct { + buffer bytes.Buffer +} + +// NewDocument returns a new roff Document. +func NewDocument() *Document { + return &Document{} +} + +// write writes the given text to the internal buffer. Following the roff docs, +// we prevent empty lines in its output, as that may mysteriously break some +// roff renderers. +func (tr *Document) writef(format string, args ...interface{}) { + if bytes.HasSuffix(tr.buffer.Bytes(), []byte("\n")) && + strings.HasPrefix(format, "\n") { + // prevent empty lines in output + format = strings.TrimPrefix(format, "\n") + } + + fmt.Fprintf(&tr.buffer, format, args...) +} + +func (tr *Document) writelnf(format string, args ...interface{}) { + tr.writef(format+"\n", args...) +} + +// Heading writes the title heading of the document. +func (tr *Document) Heading(section uint, title, description string, ts time.Time) { + tr.writef(TitleHeading, strings.ToUpper(title), section, title, ts.Format("2006-01-02"), description) +} + +// Paragraph starts a new paragraph. +func (tr *Document) Paragraph() { + tr.writelnf(Paragraph) +} + +// Indent increases the indentation level. +func (tr *Document) Indent(n int) { + if n >= 0 { + tr.writelnf(Indent+" %d", n) + } else { + tr.writelnf(Indent) + } +} + +// IndentEnd decreases the indentation level. +func (tr *Document) IndentEnd() { + tr.writelnf(IndentEnd) +} + +// TaggedParagraph starts a new tagged paragraph. +func (tr *Document) TaggedParagraph(indentation int) { + if indentation >= 0 { + tr.writelnf(TaggedParagraph+" %d", indentation) + } else { + tr.writelnf(TaggedParagraph) + } +} + +// List writes a list item. +func (tr *Document) List(text string) { + tr.writelnf(IndentedParagraph+" \\(bu 3\n%s", escapeText(strings.TrimSpace(text))) +} + +// Section writes a section heading. +func (tr *Document) Section(text string) { + tr.writelnf(SectionHeading, strings.ToUpper(text)) +} + +// EndSection ends the current section. +func (tr *Document) EndSection() { + tr.writelnf("") +} + +// Text writes text. +func (tr *Document) Text(text string) { + inList := false + for i, s := range strings.Split(text, "\n") { + if i > 0 && !inList { + // start a new paragraph if we're not in a list + tr.Paragraph() + } + + if strings.HasPrefix(s, "*") { + // list item + if !inList { + // start a new indented list if we're not in one + tr.Indent(-1) + inList = true + } + + tr.List(s[1:]) + } else { + // regular text + if inList { + // end the list if we're in one + tr.IndentEnd() + inList = false + } + + tr.writef(escapeText(s)) + } + } +} + +// TextBold writes text in bold. +func (tr *Document) TextBold(text string) { + tr.writef(Bold) + tr.Text(text) + tr.writef(PreviousFont) +} + +// TextItalic writes text in italic. +func (tr *Document) TextItalic(text string) { + tr.writef(Italic) + tr.Text(text) + tr.writef(PreviousFont) +} + +// String returns the roff document as a string. +func (tr Document) String() string { + return tr.buffer.String() +} + +func escapeText(s string) string { + s = strings.ReplaceAll(s, `\`, `\e`) + s = strings.ReplaceAll(s, ".", "\\&.") + return s +} diff --git a/vendor/github.com/pelletier/go-toml/.dockerignore b/vendor/github.com/pelletier/go-toml/.dockerignore deleted file mode 100644 index 7b5883475d..0000000000 --- a/vendor/github.com/pelletier/go-toml/.dockerignore +++ /dev/null @@ -1,2 +0,0 @@ -cmd/tomll/tomll -cmd/tomljson/tomljson diff --git a/vendor/github.com/pelletier/go-toml/.gitignore b/vendor/github.com/pelletier/go-toml/.gitignore deleted file mode 100644 index e6ba63a5c5..0000000000 --- a/vendor/github.com/pelletier/go-toml/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -test_program/test_program_bin -fuzz/ -cmd/tomll/tomll -cmd/tomljson/tomljson -cmd/tomltestgen/tomltestgen diff --git a/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md b/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md deleted file mode 100644 index 98b9893d37..0000000000 --- a/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md +++ /dev/null @@ -1,132 +0,0 @@ -## Contributing - -Thank you for your interest in go-toml! We appreciate you considering -contributing to go-toml! - -The main goal is the project is to provide an easy-to-use TOML -implementation for Go that gets the job done and gets out of your way – -dealing with TOML is probably not the central piece of your project. - -As the single maintainer of go-toml, time is scarce. All help, big or -small, is more than welcomed! - -### Ask questions - -Any question you may have, somebody else might have it too. Always feel -free to ask them on the [issues tracker][issues-tracker]. We will try to -answer them as clearly and quickly as possible, time permitting. - -Asking questions also helps us identify areas where the documentation needs -improvement, or new features that weren't envisioned before. Sometimes, a -seemingly innocent question leads to the fix of a bug. Don't hesitate and -ask away! - -### Improve the documentation - -The best way to share your knowledge and experience with go-toml is to -improve the documentation. Fix a typo, clarify an interface, add an -example, anything goes! - -The documentation is present in the [README][readme] and thorough the -source code. On release, it gets updated on [pkg.go.dev][pkg.go.dev]. To make a -change to the documentation, create a pull request with your proposed -changes. For simple changes like that, the easiest way to go is probably -the "Fork this project and edit the file" button on Github, displayed at -the top right of the file. Unless it's a trivial change (for example a -typo), provide a little bit of context in your pull request description or -commit message. - -### Report a bug - -Found a bug! Sorry to hear that :(. Help us and other track them down and -fix by reporting it. [File a new bug report][bug-report] on the [issues -tracker][issues-tracker]. The template should provide enough guidance on -what to include. When in doubt: add more details! By reducing ambiguity and -providing more information, it decreases back and forth and saves everyone -time. - -### Code changes - -Want to contribute a patch? Very happy to hear that! - -First, some high-level rules: - -* A short proposal with some POC code is better than a lengthy piece of - text with no code. Code speaks louder than words. -* No backward-incompatible patch will be accepted unless discussed. - Sometimes it's hard, and Go's lack of versioning by default does not - help, but we try not to break people's programs unless we absolutely have - to. -* If you are writing a new feature or extending an existing one, make sure - to write some documentation. -* Bug fixes need to be accompanied with regression tests. -* New code needs to be tested. -* Your commit messages need to explain why the change is needed, even if - already included in the PR description. - -It does sound like a lot, but those best practices are here to save time -overall and continuously improve the quality of the project, which is -something everyone benefits from. - -#### Get started - -The fairly standard code contribution process looks like that: - -1. [Fork the project][fork]. -2. Make your changes, commit on any branch you like. -3. [Open up a pull request][pull-request] -4. Review, potential ask for changes. -5. Merge. You're in! - -Feel free to ask for help! You can create draft pull requests to gather -some early feedback! - -#### Run the tests - -You can run tests for go-toml using Go's test tool: `go test ./...`. -When creating a pull requests, all tests will be ran on Linux on a few Go -versions (Travis CI), and on Windows using the latest Go version -(AppVeyor). - -#### Style - -Try to look around and follow the same format and structure as the rest of -the code. We enforce using `go fmt` on the whole code base. - ---- - -### Maintainers-only - -#### Merge pull request - -Checklist: - -* Passing CI. -* Does not introduce backward-incompatible changes (unless discussed). -* Has relevant doc changes. -* Has relevant unit tests. - -1. Merge using "squash and merge". -2. Make sure to edit the commit message to keep all the useful information - nice and clean. -3. Make sure the commit title is clear and contains the PR number (#123). - -#### New release - -1. Go to [releases][releases]. Click on "X commits to master since this - release". -2. Make note of all the changes. Look for backward incompatible changes, - new features, and bug fixes. -3. Pick the new version using the above and semver. -4. Create a [new release][new-release]. -5. Follow the same format as [1.1.0][release-110]. - -[issues-tracker]: https://github.com/pelletier/go-toml/issues -[bug-report]: https://github.com/pelletier/go-toml/issues/new?template=bug_report.md -[pkg.go.dev]: https://pkg.go.dev/github.com/pelletier/go-toml -[readme]: ./README.md -[fork]: https://help.github.com/articles/fork-a-repo -[pull-request]: https://help.github.com/en/articles/creating-a-pull-request -[releases]: https://github.com/pelletier/go-toml/releases -[new-release]: https://github.com/pelletier/go-toml/releases/new -[release-110]: https://github.com/pelletier/go-toml/releases/tag/v1.1.0 diff --git a/vendor/github.com/pelletier/go-toml/Dockerfile b/vendor/github.com/pelletier/go-toml/Dockerfile deleted file mode 100644 index fffdb01666..0000000000 --- a/vendor/github.com/pelletier/go-toml/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM golang:1.12-alpine3.9 as builder -WORKDIR /go/src/github.com/pelletier/go-toml -COPY . . -ENV CGO_ENABLED=0 -ENV GOOS=linux -RUN go install ./... - -FROM scratch -COPY --from=builder /go/bin/tomll /usr/bin/tomll -COPY --from=builder /go/bin/tomljson /usr/bin/tomljson -COPY --from=builder /go/bin/jsontoml /usr/bin/jsontoml diff --git a/vendor/github.com/pelletier/go-toml/Makefile b/vendor/github.com/pelletier/go-toml/Makefile deleted file mode 100644 index 9e4503aea6..0000000000 --- a/vendor/github.com/pelletier/go-toml/Makefile +++ /dev/null @@ -1,29 +0,0 @@ -export CGO_ENABLED=0 -go := go -go.goos ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f1) -go.goarch ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f2) - -out.tools := tomll tomljson jsontoml -out.dist := $(out.tools:=_$(go.goos)_$(go.goarch).tar.xz) -sources := $(wildcard **/*.go) - - -.PHONY: -tools: $(out.tools) - -$(out.tools): $(sources) - GOOS=$(go.goos) GOARCH=$(go.goarch) $(go) build ./cmd/$@ - -.PHONY: -dist: $(out.dist) - -$(out.dist):%_$(go.goos)_$(go.goarch).tar.xz: % - if [ "$(go.goos)" = "windows" ]; then \ - tar -cJf $@ $^.exe; \ - else \ - tar -cJf $@ $^; \ - fi - -.PHONY: -clean: - rm -rf $(out.tools) $(out.dist) diff --git a/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 041cdc4a2f..0000000000 --- a/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,5 +0,0 @@ -**Issue:** add link to pelletier/go-toml issue here - -Explanation of what this pull request does. - -More detailed description of the decisions being made and the reasons why (if the patch is non-trivial). diff --git a/vendor/github.com/pelletier/go-toml/README.md b/vendor/github.com/pelletier/go-toml/README.md deleted file mode 100644 index 7399e04bf6..0000000000 --- a/vendor/github.com/pelletier/go-toml/README.md +++ /dev/null @@ -1,176 +0,0 @@ -# go-toml - -Go library for the [TOML](https://toml.io/) format. - -This library supports TOML version -[v1.0.0-rc.3](https://toml.io/en/v1.0.0-rc.3) - -[![Go Reference](https://pkg.go.dev/badge/github.com/pelletier/go-toml.svg)](https://pkg.go.dev/github.com/pelletier/go-toml) -[![license](https://img.shields.io/github/license/pelletier/go-toml.svg)](https://github.com/pelletier/go-toml/blob/master/LICENSE) -[![Build Status](https://dev.azure.com/pelletierthomas/go-toml-ci/_apis/build/status/pelletier.go-toml?branchName=master)](https://dev.azure.com/pelletierthomas/go-toml-ci/_build/latest?definitionId=1&branchName=master) -[![codecov](https://codecov.io/gh/pelletier/go-toml/branch/master/graph/badge.svg)](https://codecov.io/gh/pelletier/go-toml) -[![Go Report Card](https://goreportcard.com/badge/github.com/pelletier/go-toml)](https://goreportcard.com/report/github.com/pelletier/go-toml) -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml?ref=badge_shield) - - -## Development status - -**ℹ️ Consider go-toml v2!** - -The next version of go-toml is in [active development][v2-dev], and -[nearing completion][v2-map]. - -Though technically in beta, v2 is already more tested, [fixes bugs][v1-bugs], -and [much faster][v2-bench]. If you only need reading and writing TOML documents -(majority of cases), those features are implemented and the API unlikely to -change. - -The remaining features will be added shortly. While pull-requests are welcome on -v1, no active development is expected on it. When v2.0.0 is released, v1 will be -deprecated. - -👉 [go-toml v2][v2] - -[v2]: https://github.com/pelletier/go-toml/tree/v2 -[v2-map]: https://github.com/pelletier/go-toml/discussions/506 -[v2-dev]: https://github.com/pelletier/go-toml/tree/v2 -[v1-bugs]: https://github.com/pelletier/go-toml/issues?q=is%3Aissue+is%3Aopen+label%3Av2-fixed -[v2-bench]: https://github.com/pelletier/go-toml/tree/v2#benchmarks - -## Features - -Go-toml provides the following features for using data parsed from TOML documents: - -* Load TOML documents from files and string data -* Easily navigate TOML structure using Tree -* Marshaling and unmarshaling to and from data structures -* Line & column position data for all parsed elements -* [Query support similar to JSON-Path](query/) -* Syntax errors contain line and column numbers - -## Import - -```go -import "github.com/pelletier/go-toml" -``` - -## Usage example - -Read a TOML document: - -```go -config, _ := toml.Load(` -[postgres] -user = "pelletier" -password = "mypassword"`) -// retrieve data directly -user := config.Get("postgres.user").(string) - -// or using an intermediate object -postgresConfig := config.Get("postgres").(*toml.Tree) -password := postgresConfig.Get("password").(string) -``` - -Or use Unmarshal: - -```go -type Postgres struct { - User string - Password string -} -type Config struct { - Postgres Postgres -} - -doc := []byte(` -[Postgres] -User = "pelletier" -Password = "mypassword"`) - -config := Config{} -toml.Unmarshal(doc, &config) -fmt.Println("user=", config.Postgres.User) -``` - -Or use a query: - -```go -// use a query to gather elements without walking the tree -q, _ := query.Compile("$..[user,password]") -results := q.Execute(config) -for ii, item := range results.Values() { - fmt.Printf("Query result %d: %v\n", ii, item) -} -``` - -## Documentation - -The documentation and additional examples are available at -[pkg.go.dev](https://pkg.go.dev/github.com/pelletier/go-toml). - -## Tools - -Go-toml provides three handy command line tools: - -* `tomll`: Reads TOML files and lints them. - - ``` - go install github.com/pelletier/go-toml/cmd/tomll - tomll --help - ``` -* `tomljson`: Reads a TOML file and outputs its JSON representation. - - ``` - go install github.com/pelletier/go-toml/cmd/tomljson - tomljson --help - ``` - - * `jsontoml`: Reads a JSON file and outputs a TOML representation. - - ``` - go install github.com/pelletier/go-toml/cmd/jsontoml - jsontoml --help - ``` - -### Docker image - -Those tools are also available as a Docker image from -[dockerhub](https://hub.docker.com/r/pelletier/go-toml). For example, to -use `tomljson`: - -``` -docker run -v $PWD:/workdir pelletier/go-toml tomljson /workdir/example.toml -``` - -Only master (`latest`) and tagged versions are published to dockerhub. You -can build your own image as usual: - -``` -docker build -t go-toml . -``` - -## Contribute - -Feel free to report bugs and patches using GitHub's pull requests system on -[pelletier/go-toml](https://github.com/pelletier/go-toml). Any feedback would be -much appreciated! - -### Run tests - -`go test ./...` - -### Fuzzing - -The script `./fuzz.sh` is available to -run [go-fuzz](https://github.com/dvyukov/go-fuzz) on go-toml. - -## Versioning - -Go-toml follows [Semantic Versioning](http://semver.org/). The supported version -of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of -this document. The last two major versions of Go are supported -(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)). - -## License - -The MIT License (MIT) + Apache 2.0. Read [LICENSE](LICENSE). diff --git a/vendor/github.com/pelletier/go-toml/SECURITY.md b/vendor/github.com/pelletier/go-toml/SECURITY.md deleted file mode 100644 index b2f21cfc92..0000000000 --- a/vendor/github.com/pelletier/go-toml/SECURITY.md +++ /dev/null @@ -1,19 +0,0 @@ -# Security Policy - -## Supported Versions - -Use this section to tell people about which versions of your project are -currently being supported with security updates. - -| Version | Supported | -| ---------- | ------------------ | -| Latest 2.x | :white_check_mark: | -| All 1.x | :x: | -| All 0.x | :x: | - -## Reporting a Vulnerability - -Email a vulnerability report to `security@pelletier.codes`. Make sure to include -as many details as possible to reproduce the vulnerability. This is a -side-project: I will try to get back to you as quickly as possible, time -permitting in my personal life. Providing a working patch helps very much! diff --git a/vendor/github.com/pelletier/go-toml/azure-pipelines.yml b/vendor/github.com/pelletier/go-toml/azure-pipelines.yml deleted file mode 100644 index 4af198b4d9..0000000000 --- a/vendor/github.com/pelletier/go-toml/azure-pipelines.yml +++ /dev/null @@ -1,188 +0,0 @@ -trigger: -- master - -stages: -- stage: run_checks - displayName: "Check" - dependsOn: [] - jobs: - - job: fmt - displayName: "fmt" - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go 1.16" - inputs: - version: "1.16" - - task: Go@0 - displayName: "go fmt ./..." - inputs: - command: 'custom' - customCommand: 'fmt' - arguments: './...' - - job: coverage - displayName: "coverage" - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go 1.16" - inputs: - version: "1.16" - - task: Go@0 - displayName: "Generate coverage" - inputs: - command: 'test' - arguments: "-race -coverprofile=coverage.txt -covermode=atomic" - - task: Bash@3 - inputs: - targetType: 'inline' - script: 'bash <(curl -s https://codecov.io/bash) -t ${CODECOV_TOKEN}' - env: - CODECOV_TOKEN: $(CODECOV_TOKEN) - - job: benchmark - displayName: "benchmark" - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go 1.16" - inputs: - version: "1.16" - - script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/" - - task: Bash@3 - inputs: - filePath: './benchmark.sh' - arguments: "master $(Build.Repository.Uri)" - - - job: go_unit_tests - displayName: "unit tests" - strategy: - matrix: - linux 1.16: - goVersion: '1.16' - imageName: 'ubuntu-latest' - mac 1.16: - goVersion: '1.16' - imageName: 'macOS-latest' - windows 1.16: - goVersion: '1.16' - imageName: 'windows-latest' - linux 1.15: - goVersion: '1.15' - imageName: 'ubuntu-latest' - mac 1.15: - goVersion: '1.15' - imageName: 'macOS-latest' - windows 1.15: - goVersion: '1.15' - imageName: 'windows-latest' - pool: - vmImage: $(imageName) - steps: - - task: GoTool@0 - displayName: "Install Go $(goVersion)" - inputs: - version: $(goVersion) - - task: Go@0 - displayName: "go test ./..." - inputs: - command: 'test' - arguments: './...' -- stage: build_binaries - displayName: "Build binaries" - dependsOn: run_checks - jobs: - - job: build_binary - displayName: "Build binary" - strategy: - matrix: - linux_amd64: - GOOS: linux - GOARCH: amd64 - darwin_amd64: - GOOS: darwin - GOARCH: amd64 - windows_amd64: - GOOS: windows - GOARCH: amd64 - pool: - vmImage: ubuntu-latest - steps: - - task: GoTool@0 - displayName: "Install Go" - inputs: - version: 1.16 - - task: Bash@3 - inputs: - targetType: inline - script: "make dist" - env: - go.goos: $(GOOS) - go.goarch: $(GOARCH) - - task: CopyFiles@2 - inputs: - sourceFolder: '$(Build.SourcesDirectory)' - contents: '*.tar.xz' - TargetFolder: '$(Build.ArtifactStagingDirectory)' - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: '$(Build.ArtifactStagingDirectory)' - artifactName: binaries -- stage: build_binaries_manifest - displayName: "Build binaries manifest" - dependsOn: build_binaries - jobs: - - job: build_manifest - displayName: "Build binaries manifest" - steps: - - task: DownloadBuildArtifacts@0 - inputs: - buildType: 'current' - downloadType: 'single' - artifactName: 'binaries' - downloadPath: '$(Build.SourcesDirectory)' - - task: Bash@3 - inputs: - targetType: inline - script: "cd binaries && sha256sum --binary *.tar.xz | tee $(Build.ArtifactStagingDirectory)/sha256sums.txt" - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: '$(Build.ArtifactStagingDirectory)' - artifactName: manifest - -- stage: build_docker_image - displayName: "Build Docker image" - dependsOn: run_checks - jobs: - - job: build - displayName: "Build" - pool: - vmImage: ubuntu-latest - steps: - - task: Docker@2 - inputs: - command: 'build' - Dockerfile: 'Dockerfile' - buildContext: '.' - addPipelineData: false - -- stage: publish_docker_image - displayName: "Publish Docker image" - dependsOn: build_docker_image - condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master')) - jobs: - - job: publish - displayName: "Publish" - pool: - vmImage: ubuntu-latest - steps: - - task: Docker@2 - inputs: - containerRegistry: 'DockerHub' - repository: 'pelletier/go-toml' - command: 'buildAndPush' - Dockerfile: 'Dockerfile' - buildContext: '.' - tags: 'latest' diff --git a/vendor/github.com/pelletier/go-toml/benchmark.sh b/vendor/github.com/pelletier/go-toml/benchmark.sh deleted file mode 100644 index a69d3040fa..0000000000 --- a/vendor/github.com/pelletier/go-toml/benchmark.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -set -ex - -reference_ref=${1:-master} -reference_git=${2:-.} - -if ! `hash benchstat 2>/dev/null`; then - echo "Installing benchstat" - go get golang.org/x/perf/cmd/benchstat -fi - -tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX` -ref_tempdir="${tempdir}/ref" -ref_benchmark="${ref_tempdir}/benchmark-`echo -n ${reference_ref}|tr -s '/' '-'`.txt" -local_benchmark="`pwd`/benchmark-local.txt" - -echo "=== ${reference_ref} (${ref_tempdir})" -git clone ${reference_git} ${ref_tempdir} >/dev/null 2>/dev/null -pushd ${ref_tempdir} >/dev/null -git checkout ${reference_ref} >/dev/null 2>/dev/null -go test -bench=. -benchmem | tee ${ref_benchmark} -cd benchmark -go test -bench=. -benchmem | tee -a ${ref_benchmark} -popd >/dev/null - -echo "" -echo "=== local" -go test -bench=. -benchmem | tee ${local_benchmark} -cd benchmark -go test -bench=. -benchmem | tee -a ${local_benchmark} - -echo "" -echo "=== diff" -benchstat -delta-test=none ${ref_benchmark} ${local_benchmark} diff --git a/vendor/github.com/pelletier/go-toml/doc.go b/vendor/github.com/pelletier/go-toml/doc.go deleted file mode 100644 index a1406a32b3..0000000000 --- a/vendor/github.com/pelletier/go-toml/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Package toml is a TOML parser and manipulation library. -// -// This version supports the specification as described in -// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.5.0.md -// -// Marshaling -// -// Go-toml can marshal and unmarshal TOML documents from and to data -// structures. -// -// TOML document as a tree -// -// Go-toml can operate on a TOML document as a tree. Use one of the Load* -// functions to parse TOML data and obtain a Tree instance, then one of its -// methods to manipulate the tree. -// -// JSONPath-like queries -// -// The package github.com/pelletier/go-toml/query implements a system -// similar to JSONPath to quickly retrieve elements of a TOML document using a -// single expression. See the package documentation for more information. -// -package toml diff --git a/vendor/github.com/pelletier/go-toml/example-crlf.toml b/vendor/github.com/pelletier/go-toml/example-crlf.toml deleted file mode 100644 index 780d9c68f2..0000000000 --- a/vendor/github.com/pelletier/go-toml/example-crlf.toml +++ /dev/null @@ -1,30 +0,0 @@ -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it -score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/example.toml b/vendor/github.com/pelletier/go-toml/example.toml deleted file mode 100644 index f45bf88b8f..0000000000 --- a/vendor/github.com/pelletier/go-toml/example.toml +++ /dev/null @@ -1,30 +0,0 @@ -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it -score = 4e-08 # to make sure leading zeroes in exponent parts of floats are supported \ No newline at end of file diff --git a/vendor/github.com/pelletier/go-toml/fuzz.go b/vendor/github.com/pelletier/go-toml/fuzz.go deleted file mode 100644 index 14570c8d35..0000000000 --- a/vendor/github.com/pelletier/go-toml/fuzz.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build gofuzz - -package toml - -func Fuzz(data []byte) int { - tree, err := LoadBytes(data) - if err != nil { - if tree != nil { - panic("tree must be nil if there is an error") - } - return 0 - } - - str, err := tree.ToTomlString() - if err != nil { - if str != "" { - panic(`str must be "" if there is an error`) - } - panic(err) - } - - tree, err = Load(str) - if err != nil { - if tree != nil { - panic("tree must be nil if there is an error") - } - return 0 - } - - return 1 -} diff --git a/vendor/github.com/pelletier/go-toml/fuzz.sh b/vendor/github.com/pelletier/go-toml/fuzz.sh deleted file mode 100644 index 3204b4c446..0000000000 --- a/vendor/github.com/pelletier/go-toml/fuzz.sh +++ /dev/null @@ -1,15 +0,0 @@ -#! /bin/sh -set -eu - -go get github.com/dvyukov/go-fuzz/go-fuzz -go get github.com/dvyukov/go-fuzz/go-fuzz-build - -if [ ! -e toml-fuzz.zip ]; then - go-fuzz-build github.com/pelletier/go-toml -fi - -rm -fr fuzz -mkdir -p fuzz/corpus -cp *.toml fuzz/corpus - -go-fuzz -bin=toml-fuzz.zip -workdir=fuzz diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go deleted file mode 100644 index e091500b24..0000000000 --- a/vendor/github.com/pelletier/go-toml/keysparsing.go +++ /dev/null @@ -1,112 +0,0 @@ -// Parsing keys handling both bare and quoted keys. - -package toml - -import ( - "errors" - "fmt" -) - -// Convert the bare key group string to an array. -// The input supports double quotation and single quotation, -// but escape sequences are not supported. Lexers must unescape them beforehand. -func parseKey(key string) ([]string, error) { - runes := []rune(key) - var groups []string - - if len(key) == 0 { - return nil, errors.New("empty key") - } - - idx := 0 - for idx < len(runes) { - for ; idx < len(runes) && isSpace(runes[idx]); idx++ { - // skip leading whitespace - } - if idx >= len(runes) { - break - } - r := runes[idx] - if isValidBareChar(r) { - // parse bare key - startIdx := idx - endIdx := -1 - idx++ - for idx < len(runes) { - r = runes[idx] - if isValidBareChar(r) { - idx++ - } else if r == '.' { - endIdx = idx - break - } else if isSpace(r) { - endIdx = idx - for ; idx < len(runes) && isSpace(runes[idx]); idx++ { - // skip trailing whitespace - } - if idx < len(runes) && runes[idx] != '.' { - return nil, fmt.Errorf("invalid key character after whitespace: %c", runes[idx]) - } - break - } else { - return nil, fmt.Errorf("invalid bare key character: %c", r) - } - } - if endIdx == -1 { - endIdx = idx - } - groups = append(groups, string(runes[startIdx:endIdx])) - } else if r == '\'' { - // parse single quoted key - idx++ - startIdx := idx - for { - if idx >= len(runes) { - return nil, fmt.Errorf("unclosed single-quoted key") - } - r = runes[idx] - if r == '\'' { - groups = append(groups, string(runes[startIdx:idx])) - idx++ - break - } - idx++ - } - } else if r == '"' { - // parse double quoted key - idx++ - startIdx := idx - for { - if idx >= len(runes) { - return nil, fmt.Errorf("unclosed double-quoted key") - } - r = runes[idx] - if r == '"' { - groups = append(groups, string(runes[startIdx:idx])) - idx++ - break - } - idx++ - } - } else if r == '.' { - idx++ - if idx >= len(runes) { - return nil, fmt.Errorf("unexpected end of key") - } - r = runes[idx] - if !isValidBareChar(r) && r != '\'' && r != '"' && r != ' ' { - return nil, fmt.Errorf("expecting key part after dot") - } - } else { - return nil, fmt.Errorf("invalid key character: %c", r) - } - } - if len(groups) == 0 { - return nil, fmt.Errorf("empty key") - } - return groups, nil -} - -func isValidBareChar(r rune) bool { - return isAlphanumeric(r) || r == '-' || isDigit(r) -} diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go deleted file mode 100644 index 313908e3e0..0000000000 --- a/vendor/github.com/pelletier/go-toml/lexer.go +++ /dev/null @@ -1,1031 +0,0 @@ -// TOML lexer. -// -// Written using the principles developed by Rob Pike in -// http://www.youtube.com/watch?v=HxaD_trXwRE - -package toml - -import ( - "bytes" - "errors" - "fmt" - "strconv" - "strings" -) - -// Define state functions -type tomlLexStateFn func() tomlLexStateFn - -// Define lexer -type tomlLexer struct { - inputIdx int - input []rune // Textual source - currentTokenStart int - currentTokenStop int - tokens []token - brackets []rune - line int - col int - endbufferLine int - endbufferCol int -} - -// Basic read operations on input - -func (l *tomlLexer) read() rune { - r := l.peek() - if r == '\n' { - l.endbufferLine++ - l.endbufferCol = 1 - } else { - l.endbufferCol++ - } - l.inputIdx++ - return r -} - -func (l *tomlLexer) next() rune { - r := l.read() - - if r != eof { - l.currentTokenStop++ - } - return r -} - -func (l *tomlLexer) ignore() { - l.currentTokenStart = l.currentTokenStop - l.line = l.endbufferLine - l.col = l.endbufferCol -} - -func (l *tomlLexer) skip() { - l.next() - l.ignore() -} - -func (l *tomlLexer) fastForward(n int) { - for i := 0; i < n; i++ { - l.next() - } -} - -func (l *tomlLexer) emitWithValue(t tokenType, value string) { - l.tokens = append(l.tokens, token{ - Position: Position{l.line, l.col}, - typ: t, - val: value, - }) - l.ignore() -} - -func (l *tomlLexer) emit(t tokenType) { - l.emitWithValue(t, string(l.input[l.currentTokenStart:l.currentTokenStop])) -} - -func (l *tomlLexer) peek() rune { - if l.inputIdx >= len(l.input) { - return eof - } - return l.input[l.inputIdx] -} - -func (l *tomlLexer) peekString(size int) string { - maxIdx := len(l.input) - upperIdx := l.inputIdx + size // FIXME: potential overflow - if upperIdx > maxIdx { - upperIdx = maxIdx - } - return string(l.input[l.inputIdx:upperIdx]) -} - -func (l *tomlLexer) follow(next string) bool { - return next == l.peekString(len(next)) -} - -// Error management - -func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn { - l.tokens = append(l.tokens, token{ - Position: Position{l.line, l.col}, - typ: tokenError, - val: fmt.Sprintf(format, args...), - }) - return nil -} - -// State functions - -func (l *tomlLexer) lexVoid() tomlLexStateFn { - for { - next := l.peek() - switch next { - case '}': // after '{' - return l.lexRightCurlyBrace - case '[': - return l.lexTableKey - case '#': - return l.lexComment(l.lexVoid) - case '=': - return l.lexEqual - case '\r': - fallthrough - case '\n': - l.skip() - continue - } - - if isSpace(next) { - l.skip() - } - - if isKeyStartChar(next) { - return l.lexKey - } - - if next == eof { - l.next() - break - } - } - - l.emit(tokenEOF) - return nil -} - -func (l *tomlLexer) lexRvalue() tomlLexStateFn { - for { - next := l.peek() - switch next { - case '.': - return l.errorf("cannot start float with a dot") - case '=': - return l.lexEqual - case '[': - return l.lexLeftBracket - case ']': - return l.lexRightBracket - case '{': - return l.lexLeftCurlyBrace - case '}': - return l.lexRightCurlyBrace - case '#': - return l.lexComment(l.lexRvalue) - case '"': - return l.lexString - case '\'': - return l.lexLiteralString - case ',': - return l.lexComma - case '\r': - fallthrough - case '\n': - l.skip() - if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '[' { - return l.lexRvalue - } - return l.lexVoid - } - - if l.follow("true") { - return l.lexTrue - } - - if l.follow("false") { - return l.lexFalse - } - - if l.follow("inf") { - return l.lexInf - } - - if l.follow("nan") { - return l.lexNan - } - - if isSpace(next) { - l.skip() - continue - } - - if next == eof { - l.next() - break - } - - if next == '+' || next == '-' { - return l.lexNumber - } - - if isDigit(next) { - return l.lexDateTimeOrNumber - } - - return l.errorf("no value can start with %c", next) - } - - l.emit(tokenEOF) - return nil -} - -func (l *tomlLexer) lexDateTimeOrNumber() tomlLexStateFn { - // Could be either a date/time, or a digit. - // The options for date/times are: - // YYYY-... => date or date-time - // HH:... => time - // Anything else should be a number. - - lookAhead := l.peekString(5) - if len(lookAhead) < 3 { - return l.lexNumber() - } - - for idx, r := range lookAhead { - if !isDigit(r) { - if idx == 2 && r == ':' { - return l.lexDateTimeOrTime() - } - if idx == 4 && r == '-' { - return l.lexDateTimeOrTime() - } - return l.lexNumber() - } - } - return l.lexNumber() -} - -func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn { - l.next() - l.emit(tokenLeftCurlyBrace) - l.brackets = append(l.brackets, '{') - return l.lexVoid -} - -func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn { - l.next() - l.emit(tokenRightCurlyBrace) - if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '{' { - return l.errorf("cannot have '}' here") - } - l.brackets = l.brackets[:len(l.brackets)-1] - return l.lexRvalue -} - -func (l *tomlLexer) lexDateTimeOrTime() tomlLexStateFn { - // Example matches: - // 1979-05-27T07:32:00Z - // 1979-05-27T00:32:00-07:00 - // 1979-05-27T00:32:00.999999-07:00 - // 1979-05-27 07:32:00Z - // 1979-05-27 00:32:00-07:00 - // 1979-05-27 00:32:00.999999-07:00 - // 1979-05-27T07:32:00 - // 1979-05-27T00:32:00.999999 - // 1979-05-27 07:32:00 - // 1979-05-27 00:32:00.999999 - // 1979-05-27 - // 07:32:00 - // 00:32:00.999999 - - // we already know those two are digits - l.next() - l.next() - - // Got 2 digits. At that point it could be either a time or a date(-time). - - r := l.next() - if r == ':' { - return l.lexTime() - } - - return l.lexDateTime() -} - -func (l *tomlLexer) lexDateTime() tomlLexStateFn { - // This state accepts an offset date-time, a local date-time, or a local date. - // - // v--- cursor - // 1979-05-27T07:32:00Z - // 1979-05-27T00:32:00-07:00 - // 1979-05-27T00:32:00.999999-07:00 - // 1979-05-27 07:32:00Z - // 1979-05-27 00:32:00-07:00 - // 1979-05-27 00:32:00.999999-07:00 - // 1979-05-27T07:32:00 - // 1979-05-27T00:32:00.999999 - // 1979-05-27 07:32:00 - // 1979-05-27 00:32:00.999999 - // 1979-05-27 - - // date - - // already checked by lexRvalue - l.next() // digit - l.next() // - - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid month digit in date: %c", r) - } - } - - r := l.next() - if r != '-' { - return l.errorf("expected - to separate month of a date, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid day digit in date: %c", r) - } - } - - l.emit(tokenLocalDate) - - r = l.peek() - - if r == eof { - - return l.lexRvalue - } - - if r != ' ' && r != 'T' { - return l.errorf("incorrect date/time separation character: %c", r) - } - - if r == ' ' { - lookAhead := l.peekString(3)[1:] - if len(lookAhead) < 2 { - return l.lexRvalue - } - for _, r := range lookAhead { - if !isDigit(r) { - return l.lexRvalue - } - } - } - - l.skip() // skip the T or ' ' - - // time - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid hour digit in time: %c", r) - } - } - - r = l.next() - if r != ':' { - return l.errorf("time hour/minute separator should be :, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid minute digit in time: %c", r) - } - } - - r = l.next() - if r != ':' { - return l.errorf("time minute/second separator should be :, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid second digit in time: %c", r) - } - } - - r = l.peek() - if r == '.' { - l.next() - r := l.next() - if !isDigit(r) { - return l.errorf("expected at least one digit in time's fraction, not %c", r) - } - - for { - r := l.peek() - if !isDigit(r) { - break - } - l.next() - } - } - - l.emit(tokenLocalTime) - - return l.lexTimeOffset - -} - -func (l *tomlLexer) lexTimeOffset() tomlLexStateFn { - // potential offset - - // Z - // -07:00 - // +07:00 - // nothing - - r := l.peek() - - if r == 'Z' { - l.next() - l.emit(tokenTimeOffset) - } else if r == '+' || r == '-' { - l.next() - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid hour digit in time offset: %c", r) - } - } - - r = l.next() - if r != ':' { - return l.errorf("time offset hour/minute separator should be :, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid minute digit in time offset: %c", r) - } - } - - l.emit(tokenTimeOffset) - } - - return l.lexRvalue -} - -func (l *tomlLexer) lexTime() tomlLexStateFn { - // v--- cursor - // 07:32:00 - // 00:32:00.999999 - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid minute digit in time: %c", r) - } - } - - r := l.next() - if r != ':' { - return l.errorf("time minute/second separator should be :, not %c", r) - } - - for i := 0; i < 2; i++ { - r := l.next() - if !isDigit(r) { - return l.errorf("invalid second digit in time: %c", r) - } - } - - r = l.peek() - if r == '.' { - l.next() - r := l.next() - if !isDigit(r) { - return l.errorf("expected at least one digit in time's fraction, not %c", r) - } - - for { - r := l.peek() - if !isDigit(r) { - break - } - l.next() - } - } - - l.emit(tokenLocalTime) - return l.lexRvalue - -} - -func (l *tomlLexer) lexTrue() tomlLexStateFn { - l.fastForward(4) - l.emit(tokenTrue) - return l.lexRvalue -} - -func (l *tomlLexer) lexFalse() tomlLexStateFn { - l.fastForward(5) - l.emit(tokenFalse) - return l.lexRvalue -} - -func (l *tomlLexer) lexInf() tomlLexStateFn { - l.fastForward(3) - l.emit(tokenInf) - return l.lexRvalue -} - -func (l *tomlLexer) lexNan() tomlLexStateFn { - l.fastForward(3) - l.emit(tokenNan) - return l.lexRvalue -} - -func (l *tomlLexer) lexEqual() tomlLexStateFn { - l.next() - l.emit(tokenEqual) - return l.lexRvalue -} - -func (l *tomlLexer) lexComma() tomlLexStateFn { - l.next() - l.emit(tokenComma) - if len(l.brackets) > 0 && l.brackets[len(l.brackets)-1] == '{' { - return l.lexVoid - } - return l.lexRvalue -} - -// Parse the key and emits its value without escape sequences. -// bare keys, basic string keys and literal string keys are supported. -func (l *tomlLexer) lexKey() tomlLexStateFn { - var sb strings.Builder - - for r := l.peek(); isKeyChar(r) || r == '\n' || r == '\r'; r = l.peek() { - if r == '"' { - l.next() - str, err := l.lexStringAsString(`"`, false, true) - if err != nil { - return l.errorf(err.Error()) - } - sb.WriteString("\"") - sb.WriteString(str) - sb.WriteString("\"") - l.next() - continue - } else if r == '\'' { - l.next() - str, err := l.lexLiteralStringAsString(`'`, false) - if err != nil { - return l.errorf(err.Error()) - } - sb.WriteString("'") - sb.WriteString(str) - sb.WriteString("'") - l.next() - continue - } else if r == '\n' { - return l.errorf("keys cannot contain new lines") - } else if isSpace(r) { - var str strings.Builder - str.WriteString(" ") - - // skip trailing whitespace - l.next() - for r = l.peek(); isSpace(r); r = l.peek() { - str.WriteRune(r) - l.next() - } - // break loop if not a dot - if r != '.' { - break - } - str.WriteString(".") - // skip trailing whitespace after dot - l.next() - for r = l.peek(); isSpace(r); r = l.peek() { - str.WriteRune(r) - l.next() - } - sb.WriteString(str.String()) - continue - } else if r == '.' { - // skip - } else if !isValidBareChar(r) { - return l.errorf("keys cannot contain %c character", r) - } - sb.WriteRune(r) - l.next() - } - l.emitWithValue(tokenKey, sb.String()) - return l.lexVoid -} - -func (l *tomlLexer) lexComment(previousState tomlLexStateFn) tomlLexStateFn { - return func() tomlLexStateFn { - for next := l.peek(); next != '\n' && next != eof; next = l.peek() { - if next == '\r' && l.follow("\r\n") { - break - } - l.next() - } - l.ignore() - return previousState - } -} - -func (l *tomlLexer) lexLeftBracket() tomlLexStateFn { - l.next() - l.emit(tokenLeftBracket) - l.brackets = append(l.brackets, '[') - return l.lexRvalue -} - -func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNewLine bool) (string, error) { - var sb strings.Builder - - if discardLeadingNewLine { - if l.follow("\r\n") { - l.skip() - l.skip() - } else if l.peek() == '\n' { - l.skip() - } - } - - // find end of string - for { - if l.follow(terminator) { - return sb.String(), nil - } - - next := l.peek() - if next == eof { - break - } - sb.WriteRune(l.next()) - } - - return "", errors.New("unclosed string") -} - -func (l *tomlLexer) lexLiteralString() tomlLexStateFn { - l.skip() - - // handle special case for triple-quote - terminator := "'" - discardLeadingNewLine := false - if l.follow("''") { - l.skip() - l.skip() - terminator = "'''" - discardLeadingNewLine = true - } - - str, err := l.lexLiteralStringAsString(terminator, discardLeadingNewLine) - if err != nil { - return l.errorf(err.Error()) - } - - l.emitWithValue(tokenString, str) - l.fastForward(len(terminator)) - l.ignore() - return l.lexRvalue -} - -// Lex a string and return the results as a string. -// Terminator is the substring indicating the end of the token. -// The resulting string does not include the terminator. -func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, acceptNewLines bool) (string, error) { - var sb strings.Builder - - if discardLeadingNewLine { - if l.follow("\r\n") { - l.skip() - l.skip() - } else if l.peek() == '\n' { - l.skip() - } - } - - for { - if l.follow(terminator) { - return sb.String(), nil - } - - if l.follow("\\") { - l.next() - switch l.peek() { - case '\r': - fallthrough - case '\n': - fallthrough - case '\t': - fallthrough - case ' ': - // skip all whitespace chars following backslash - for strings.ContainsRune("\r\n\t ", l.peek()) { - l.next() - } - case '"': - sb.WriteString("\"") - l.next() - case 'n': - sb.WriteString("\n") - l.next() - case 'b': - sb.WriteString("\b") - l.next() - case 'f': - sb.WriteString("\f") - l.next() - case '/': - sb.WriteString("/") - l.next() - case 't': - sb.WriteString("\t") - l.next() - case 'r': - sb.WriteString("\r") - l.next() - case '\\': - sb.WriteString("\\") - l.next() - case 'u': - l.next() - var code strings.Builder - for i := 0; i < 4; i++ { - c := l.peek() - if !isHexDigit(c) { - return "", errors.New("unfinished unicode escape") - } - l.next() - code.WriteRune(c) - } - intcode, err := strconv.ParseInt(code.String(), 16, 32) - if err != nil { - return "", errors.New("invalid unicode escape: \\u" + code.String()) - } - sb.WriteRune(rune(intcode)) - case 'U': - l.next() - var code strings.Builder - for i := 0; i < 8; i++ { - c := l.peek() - if !isHexDigit(c) { - return "", errors.New("unfinished unicode escape") - } - l.next() - code.WriteRune(c) - } - intcode, err := strconv.ParseInt(code.String(), 16, 64) - if err != nil { - return "", errors.New("invalid unicode escape: \\U" + code.String()) - } - sb.WriteRune(rune(intcode)) - default: - return "", errors.New("invalid escape sequence: \\" + string(l.peek())) - } - } else { - r := l.peek() - - if 0x00 <= r && r <= 0x1F && r != '\t' && !(acceptNewLines && (r == '\n' || r == '\r')) { - return "", fmt.Errorf("unescaped control character %U", r) - } - l.next() - sb.WriteRune(r) - } - - if l.peek() == eof { - break - } - } - - return "", errors.New("unclosed string") -} - -func (l *tomlLexer) lexString() tomlLexStateFn { - l.skip() - - // handle special case for triple-quote - terminator := `"` - discardLeadingNewLine := false - acceptNewLines := false - if l.follow(`""`) { - l.skip() - l.skip() - terminator = `"""` - discardLeadingNewLine = true - acceptNewLines = true - } - - str, err := l.lexStringAsString(terminator, discardLeadingNewLine, acceptNewLines) - if err != nil { - return l.errorf(err.Error()) - } - - l.emitWithValue(tokenString, str) - l.fastForward(len(terminator)) - l.ignore() - return l.lexRvalue -} - -func (l *tomlLexer) lexTableKey() tomlLexStateFn { - l.next() - - if l.peek() == '[' { - // token '[[' signifies an array of tables - l.next() - l.emit(tokenDoubleLeftBracket) - return l.lexInsideTableArrayKey - } - // vanilla table key - l.emit(tokenLeftBracket) - return l.lexInsideTableKey -} - -// Parse the key till "]]", but only bare keys are supported -func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn { - for r := l.peek(); r != eof; r = l.peek() { - switch r { - case ']': - if l.currentTokenStop > l.currentTokenStart { - l.emit(tokenKeyGroupArray) - } - l.next() - if l.peek() != ']' { - break - } - l.next() - l.emit(tokenDoubleRightBracket) - return l.lexVoid - case '[': - return l.errorf("table array key cannot contain ']'") - default: - l.next() - } - } - return l.errorf("unclosed table array key") -} - -// Parse the key till "]" but only bare keys are supported -func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn { - for r := l.peek(); r != eof; r = l.peek() { - switch r { - case ']': - if l.currentTokenStop > l.currentTokenStart { - l.emit(tokenKeyGroup) - } - l.next() - l.emit(tokenRightBracket) - return l.lexVoid - case '[': - return l.errorf("table key cannot contain ']'") - default: - l.next() - } - } - return l.errorf("unclosed table key") -} - -func (l *tomlLexer) lexRightBracket() tomlLexStateFn { - l.next() - l.emit(tokenRightBracket) - if len(l.brackets) == 0 || l.brackets[len(l.brackets)-1] != '[' { - return l.errorf("cannot have ']' here") - } - l.brackets = l.brackets[:len(l.brackets)-1] - return l.lexRvalue -} - -type validRuneFn func(r rune) bool - -func isValidHexRune(r rune) bool { - return r >= 'a' && r <= 'f' || - r >= 'A' && r <= 'F' || - r >= '0' && r <= '9' || - r == '_' -} - -func isValidOctalRune(r rune) bool { - return r >= '0' && r <= '7' || r == '_' -} - -func isValidBinaryRune(r rune) bool { - return r == '0' || r == '1' || r == '_' -} - -func (l *tomlLexer) lexNumber() tomlLexStateFn { - r := l.peek() - - if r == '0' { - follow := l.peekString(2) - if len(follow) == 2 { - var isValidRune validRuneFn - switch follow[1] { - case 'x': - isValidRune = isValidHexRune - case 'o': - isValidRune = isValidOctalRune - case 'b': - isValidRune = isValidBinaryRune - default: - if follow[1] >= 'a' && follow[1] <= 'z' || follow[1] >= 'A' && follow[1] <= 'Z' { - return l.errorf("unknown number base: %s. possible options are x (hex) o (octal) b (binary)", string(follow[1])) - } - } - - if isValidRune != nil { - l.next() - l.next() - digitSeen := false - for { - next := l.peek() - if !isValidRune(next) { - break - } - digitSeen = true - l.next() - } - - if !digitSeen { - return l.errorf("number needs at least one digit") - } - - l.emit(tokenInteger) - - return l.lexRvalue - } - } - } - - if r == '+' || r == '-' { - l.next() - if l.follow("inf") { - return l.lexInf - } - if l.follow("nan") { - return l.lexNan - } - } - - pointSeen := false - expSeen := false - digitSeen := false - for { - next := l.peek() - if next == '.' { - if pointSeen { - return l.errorf("cannot have two dots in one float") - } - l.next() - if !isDigit(l.peek()) { - return l.errorf("float cannot end with a dot") - } - pointSeen = true - } else if next == 'e' || next == 'E' { - expSeen = true - l.next() - r := l.peek() - if r == '+' || r == '-' { - l.next() - } - } else if isDigit(next) { - digitSeen = true - l.next() - } else if next == '_' { - l.next() - } else { - break - } - if pointSeen && !digitSeen { - return l.errorf("cannot start float with a dot") - } - } - - if !digitSeen { - return l.errorf("no digit in that number") - } - if pointSeen || expSeen { - l.emit(tokenFloat) - } else { - l.emit(tokenInteger) - } - return l.lexRvalue -} - -func (l *tomlLexer) run() { - for state := l.lexVoid; state != nil; { - state = state() - } -} - -// Entry point -func lexToml(inputBytes []byte) []token { - runes := bytes.Runes(inputBytes) - l := &tomlLexer{ - input: runes, - tokens: make([]token, 0, 256), - line: 1, - col: 1, - endbufferLine: 1, - endbufferCol: 1, - } - l.run() - return l.tokens -} diff --git a/vendor/github.com/pelletier/go-toml/localtime.go b/vendor/github.com/pelletier/go-toml/localtime.go deleted file mode 100644 index 9dfe4b9e6c..0000000000 --- a/vendor/github.com/pelletier/go-toml/localtime.go +++ /dev/null @@ -1,287 +0,0 @@ -// Implementation of TOML's local date/time. -// -// Copied over from Google's civil to avoid pulling all the Google dependencies. -// Originals: -// https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil.go -// Changes: -// * Renamed files from civil* to localtime*. -// * Package changed from civil to toml. -// * 'Local' prefix added to all structs. -// -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package civil implements types for civil time, a time-zone-independent -// representation of time that follows the rules of the proleptic -// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second -// minutes. -// -// Because they lack location information, these types do not represent unique -// moments or intervals of time. Use time.Time for that purpose. -package toml - -import ( - "fmt" - "time" -) - -// A LocalDate represents a date (year, month, day). -// -// This type does not include location information, and therefore does not -// describe a unique 24-hour timespan. -type LocalDate struct { - Year int // Year (e.g., 2014). - Month time.Month // Month of the year (January = 1, ...). - Day int // Day of the month, starting at 1. -} - -// LocalDateOf returns the LocalDate in which a time occurs in that time's location. -func LocalDateOf(t time.Time) LocalDate { - var d LocalDate - d.Year, d.Month, d.Day = t.Date() - return d -} - -// ParseLocalDate parses a string in RFC3339 full-date format and returns the date value it represents. -func ParseLocalDate(s string) (LocalDate, error) { - t, err := time.Parse("2006-01-02", s) - if err != nil { - return LocalDate{}, err - } - return LocalDateOf(t), nil -} - -// String returns the date in RFC3339 full-date format. -func (d LocalDate) String() string { - return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) -} - -// IsValid reports whether the date is valid. -func (d LocalDate) IsValid() bool { - return LocalDateOf(d.In(time.UTC)) == d -} - -// In returns the time corresponding to time 00:00:00 of the date in the location. -// -// In is always consistent with time.LocalDate, even when time.LocalDate returns a time -// on a different day. For example, if loc is America/Indiana/Vincennes, then both -// time.LocalDate(1955, time.May, 1, 0, 0, 0, 0, loc) -// and -// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}.In(loc) -// return 23:00:00 on April 30, 1955. -// -// In panics if loc is nil. -func (d LocalDate) In(loc *time.Location) time.Time { - return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc) -} - -// AddDays returns the date that is n days in the future. -// n can also be negative to go into the past. -func (d LocalDate) AddDays(n int) LocalDate { - return LocalDateOf(d.In(time.UTC).AddDate(0, 0, n)) -} - -// DaysSince returns the signed number of days between the date and s, not including the end day. -// This is the inverse operation to AddDays. -func (d LocalDate) DaysSince(s LocalDate) (days int) { - // We convert to Unix time so we do not have to worry about leap seconds: - // Unix time increases by exactly 86400 seconds per day. - deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix() - return int(deltaUnix / 86400) -} - -// Before reports whether d1 occurs before d2. -func (d1 LocalDate) Before(d2 LocalDate) bool { - if d1.Year != d2.Year { - return d1.Year < d2.Year - } - if d1.Month != d2.Month { - return d1.Month < d2.Month - } - return d1.Day < d2.Day -} - -// After reports whether d1 occurs after d2. -func (d1 LocalDate) After(d2 LocalDate) bool { - return d2.Before(d1) -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The output is the result of d.String(). -func (d LocalDate) MarshalText() ([]byte, error) { - return []byte(d.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// The date is expected to be a string in a format accepted by ParseLocalDate. -func (d *LocalDate) UnmarshalText(data []byte) error { - var err error - *d, err = ParseLocalDate(string(data)) - return err -} - -// A LocalTime represents a time with nanosecond precision. -// -// This type does not include location information, and therefore does not -// describe a unique moment in time. -// -// This type exists to represent the TIME type in storage-based APIs like BigQuery. -// Most operations on Times are unlikely to be meaningful. Prefer the LocalDateTime type. -type LocalTime struct { - Hour int // The hour of the day in 24-hour format; range [0-23] - Minute int // The minute of the hour; range [0-59] - Second int // The second of the minute; range [0-59] - Nanosecond int // The nanosecond of the second; range [0-999999999] -} - -// LocalTimeOf returns the LocalTime representing the time of day in which a time occurs -// in that time's location. It ignores the date. -func LocalTimeOf(t time.Time) LocalTime { - var tm LocalTime - tm.Hour, tm.Minute, tm.Second = t.Clock() - tm.Nanosecond = t.Nanosecond() - return tm -} - -// ParseLocalTime parses a string and returns the time value it represents. -// ParseLocalTime accepts an extended form of the RFC3339 partial-time format. After -// the HH:MM:SS part of the string, an optional fractional part may appear, -// consisting of a decimal point followed by one to nine decimal digits. -// (RFC3339 admits only one digit after the decimal point). -func ParseLocalTime(s string) (LocalTime, error) { - t, err := time.Parse("15:04:05.999999999", s) - if err != nil { - return LocalTime{}, err - } - return LocalTimeOf(t), nil -} - -// String returns the date in the format described in ParseLocalTime. If Nanoseconds -// is zero, no fractional part will be generated. Otherwise, the result will -// end with a fractional part consisting of a decimal point and nine digits. -func (t LocalTime) String() string { - s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second) - if t.Nanosecond == 0 { - return s - } - return s + fmt.Sprintf(".%09d", t.Nanosecond) -} - -// IsValid reports whether the time is valid. -func (t LocalTime) IsValid() bool { - // Construct a non-zero time. - tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC) - return LocalTimeOf(tm) == t -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The output is the result of t.String(). -func (t LocalTime) MarshalText() ([]byte, error) { - return []byte(t.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// The time is expected to be a string in a format accepted by ParseLocalTime. -func (t *LocalTime) UnmarshalText(data []byte) error { - var err error - *t, err = ParseLocalTime(string(data)) - return err -} - -// A LocalDateTime represents a date and time. -// -// This type does not include location information, and therefore does not -// describe a unique moment in time. -type LocalDateTime struct { - Date LocalDate - Time LocalTime -} - -// Note: We deliberately do not embed LocalDate into LocalDateTime, to avoid promoting AddDays and Sub. - -// LocalDateTimeOf returns the LocalDateTime in which a time occurs in that time's location. -func LocalDateTimeOf(t time.Time) LocalDateTime { - return LocalDateTime{ - Date: LocalDateOf(t), - Time: LocalTimeOf(t), - } -} - -// ParseLocalDateTime parses a string and returns the LocalDateTime it represents. -// ParseLocalDateTime accepts a variant of the RFC3339 date-time format that omits -// the time offset but includes an optional fractional time, as described in -// ParseLocalTime. Informally, the accepted format is -// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF] -// where the 'T' may be a lower-case 't'. -func ParseLocalDateTime(s string) (LocalDateTime, error) { - t, err := time.Parse("2006-01-02T15:04:05.999999999", s) - if err != nil { - t, err = time.Parse("2006-01-02t15:04:05.999999999", s) - if err != nil { - return LocalDateTime{}, err - } - } - return LocalDateTimeOf(t), nil -} - -// String returns the date in the format described in ParseLocalDate. -func (dt LocalDateTime) String() string { - return dt.Date.String() + "T" + dt.Time.String() -} - -// IsValid reports whether the datetime is valid. -func (dt LocalDateTime) IsValid() bool { - return dt.Date.IsValid() && dt.Time.IsValid() -} - -// In returns the time corresponding to the LocalDateTime in the given location. -// -// If the time is missing or ambigous at the location, In returns the same -// result as time.LocalDate. For example, if loc is America/Indiana/Vincennes, then -// both -// time.LocalDate(1955, time.May, 1, 0, 30, 0, 0, loc) -// and -// civil.LocalDateTime{ -// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}}, -// civil.LocalTime{Minute: 30}}.In(loc) -// return 23:30:00 on April 30, 1955. -// -// In panics if loc is nil. -func (dt LocalDateTime) In(loc *time.Location) time.Time { - return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc) -} - -// Before reports whether dt1 occurs before dt2. -func (dt1 LocalDateTime) Before(dt2 LocalDateTime) bool { - return dt1.In(time.UTC).Before(dt2.In(time.UTC)) -} - -// After reports whether dt1 occurs after dt2. -func (dt1 LocalDateTime) After(dt2 LocalDateTime) bool { - return dt2.Before(dt1) -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The output is the result of dt.String(). -func (dt LocalDateTime) MarshalText() ([]byte, error) { - return []byte(dt.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// The datetime is expected to be a string in a format accepted by ParseLocalDateTime -func (dt *LocalDateTime) UnmarshalText(data []byte) error { - var err error - *dt, err = ParseLocalDateTime(string(data)) - return err -} diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go deleted file mode 100644 index 5712730498..0000000000 --- a/vendor/github.com/pelletier/go-toml/marshal.go +++ /dev/null @@ -1,1308 +0,0 @@ -package toml - -import ( - "bytes" - "encoding" - "errors" - "fmt" - "io" - "reflect" - "sort" - "strconv" - "strings" - "time" -) - -const ( - tagFieldName = "toml" - tagFieldComment = "comment" - tagCommented = "commented" - tagMultiline = "multiline" - tagLiteral = "literal" - tagDefault = "default" -) - -type tomlOpts struct { - name string - nameFromTag bool - comment string - commented bool - multiline bool - literal bool - include bool - omitempty bool - defaultValue string -} - -type encOpts struct { - quoteMapKeys bool - arraysOneElementPerLine bool -} - -var encOptsDefaults = encOpts{ - quoteMapKeys: false, -} - -type annotation struct { - tag string - comment string - commented string - multiline string - literal string - defaultValue string -} - -var annotationDefault = annotation{ - tag: tagFieldName, - comment: tagFieldComment, - commented: tagCommented, - multiline: tagMultiline, - literal: tagLiteral, - defaultValue: tagDefault, -} - -type MarshalOrder int - -// Orders the Encoder can write the fields to the output stream. -const ( - // Sort fields alphabetically. - OrderAlphabetical MarshalOrder = iota + 1 - // Preserve the order the fields are encountered. For example, the order of fields in - // a struct. - OrderPreserve -) - -var timeType = reflect.TypeOf(time.Time{}) -var marshalerType = reflect.TypeOf(new(Marshaler)).Elem() -var unmarshalerType = reflect.TypeOf(new(Unmarshaler)).Elem() -var textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem() -var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() -var localDateType = reflect.TypeOf(LocalDate{}) -var localTimeType = reflect.TypeOf(LocalTime{}) -var localDateTimeType = reflect.TypeOf(LocalDateTime{}) -var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{}) - -// Check if the given marshal type maps to a Tree primitive -func isPrimitive(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isPrimitive(mtype.Elem()) - case reflect.Bool: - return true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Struct: - return isTimeType(mtype) - default: - return false - } -} - -func isTimeType(mtype reflect.Type) bool { - return mtype == timeType || mtype == localDateType || mtype == localDateTimeType || mtype == localTimeType -} - -// Check if the given marshal type maps to a Tree slice or array -func isTreeSequence(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isTreeSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return isTree(mtype.Elem()) - default: - return false - } -} - -// Check if the given marshal type maps to a slice or array of a custom marshaler type -func isCustomMarshalerSequence(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isCustomMarshalerSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return isCustomMarshaler(mtype.Elem()) || isCustomMarshaler(reflect.New(mtype.Elem()).Type()) - default: - return false - } -} - -// Check if the given marshal type maps to a slice or array of a text marshaler type -func isTextMarshalerSequence(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isTextMarshalerSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return isTextMarshaler(mtype.Elem()) || isTextMarshaler(reflect.New(mtype.Elem()).Type()) - default: - return false - } -} - -// Check if the given marshal type maps to a non-Tree slice or array -func isOtherSequence(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isOtherSequence(mtype.Elem()) - case reflect.Slice, reflect.Array: - return !isTreeSequence(mtype) - default: - return false - } -} - -// Check if the given marshal type maps to a Tree -func isTree(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isTree(mtype.Elem()) - case reflect.Map: - return true - case reflect.Struct: - return !isPrimitive(mtype) - default: - return false - } -} - -func isCustomMarshaler(mtype reflect.Type) bool { - return mtype.Implements(marshalerType) -} - -func callCustomMarshaler(mval reflect.Value) ([]byte, error) { - return mval.Interface().(Marshaler).MarshalTOML() -} - -func isTextMarshaler(mtype reflect.Type) bool { - return mtype.Implements(textMarshalerType) && !isTimeType(mtype) -} - -func callTextMarshaler(mval reflect.Value) ([]byte, error) { - return mval.Interface().(encoding.TextMarshaler).MarshalText() -} - -func isCustomUnmarshaler(mtype reflect.Type) bool { - return mtype.Implements(unmarshalerType) -} - -func callCustomUnmarshaler(mval reflect.Value, tval interface{}) error { - return mval.Interface().(Unmarshaler).UnmarshalTOML(tval) -} - -func isTextUnmarshaler(mtype reflect.Type) bool { - return mtype.Implements(textUnmarshalerType) -} - -func callTextUnmarshaler(mval reflect.Value, text []byte) error { - return mval.Interface().(encoding.TextUnmarshaler).UnmarshalText(text) -} - -// Marshaler is the interface implemented by types that -// can marshal themselves into valid TOML. -type Marshaler interface { - MarshalTOML() ([]byte, error) -} - -// Unmarshaler is the interface implemented by types that -// can unmarshal a TOML description of themselves. -type Unmarshaler interface { - UnmarshalTOML(interface{}) error -} - -/* -Marshal returns the TOML encoding of v. Behavior is similar to the Go json -encoder, except that there is no concept of a Marshaler interface or MarshalTOML -function for sub-structs, and currently only definite types can be marshaled -(i.e. no `interface{}`). - -The following struct annotations are supported: - - toml:"Field" Overrides the field's name to output. - omitempty When set, empty values and groups are not emitted. - comment:"comment" Emits a # comment on the same line. This supports new lines. - commented:"true" Emits the value as commented. - -Note that pointers are automatically assigned the "omitempty" option, as TOML -explicitly does not handle null values (saying instead the label should be -dropped). - -Tree structural types and corresponding marshal types: - - *Tree (*)struct, (*)map[string]interface{} - []*Tree (*)[](*)struct, (*)[](*)map[string]interface{} - []interface{} (as interface{}) (*)[]primitive, (*)[]([]interface{}) - interface{} (*)primitive - -Tree primitive types and corresponding marshal types: - - uint64 uint, uint8-uint64, pointers to same - int64 int, int8-uint64, pointers to same - float64 float32, float64, pointers to same - string string, pointers to same - bool bool, pointers to same - time.LocalTime time.LocalTime{}, pointers to same - -For additional flexibility, use the Encoder API. -*/ -func Marshal(v interface{}) ([]byte, error) { - return NewEncoder(nil).marshal(v) -} - -// Encoder writes TOML values to an output stream. -type Encoder struct { - w io.Writer - encOpts - annotation - line int - col int - order MarshalOrder - promoteAnon bool - compactComments bool - indentation string -} - -// NewEncoder returns a new encoder that writes to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: w, - encOpts: encOptsDefaults, - annotation: annotationDefault, - line: 0, - col: 1, - order: OrderAlphabetical, - indentation: " ", - } -} - -// Encode writes the TOML encoding of v to the stream. -// -// See the documentation for Marshal for details. -func (e *Encoder) Encode(v interface{}) error { - b, err := e.marshal(v) - if err != nil { - return err - } - if _, err := e.w.Write(b); err != nil { - return err - } - return nil -} - -// QuoteMapKeys sets up the encoder to encode -// maps with string type keys with quoted TOML keys. -// -// This relieves the character limitations on map keys. -func (e *Encoder) QuoteMapKeys(v bool) *Encoder { - e.quoteMapKeys = v - return e -} - -// ArraysWithOneElementPerLine sets up the encoder to encode arrays -// with more than one element on multiple lines instead of one. -// -// For example: -// -// A = [1,2,3] -// -// Becomes -// -// A = [ -// 1, -// 2, -// 3, -// ] -func (e *Encoder) ArraysWithOneElementPerLine(v bool) *Encoder { - e.arraysOneElementPerLine = v - return e -} - -// Order allows to change in which order fields will be written to the output stream. -func (e *Encoder) Order(ord MarshalOrder) *Encoder { - e.order = ord - return e -} - -// Indentation allows to change indentation when marshalling. -func (e *Encoder) Indentation(indent string) *Encoder { - e.indentation = indent - return e -} - -// SetTagName allows changing default tag "toml" -func (e *Encoder) SetTagName(v string) *Encoder { - e.tag = v - return e -} - -// SetTagComment allows changing default tag "comment" -func (e *Encoder) SetTagComment(v string) *Encoder { - e.comment = v - return e -} - -// SetTagCommented allows changing default tag "commented" -func (e *Encoder) SetTagCommented(v string) *Encoder { - e.commented = v - return e -} - -// SetTagMultiline allows changing default tag "multiline" -func (e *Encoder) SetTagMultiline(v string) *Encoder { - e.multiline = v - return e -} - -// PromoteAnonymous allows to change how anonymous struct fields are marshaled. -// Usually, they are marshaled as if the inner exported fields were fields in -// the outer struct. However, if an anonymous struct field is given a name in -// its TOML tag, it is treated like a regular struct field with that name. -// rather than being anonymous. -// -// In case anonymous promotion is enabled, all anonymous structs are promoted -// and treated like regular struct fields. -func (e *Encoder) PromoteAnonymous(promote bool) *Encoder { - e.promoteAnon = promote - return e -} - -// CompactComments removes the new line before each comment in the tree. -func (e *Encoder) CompactComments(cc bool) *Encoder { - e.compactComments = cc - return e -} - -func (e *Encoder) marshal(v interface{}) ([]byte, error) { - // Check if indentation is valid - for _, char := range e.indentation { - if !isSpace(char) { - return []byte{}, fmt.Errorf("invalid indentation: must only contains space or tab characters") - } - } - - mtype := reflect.TypeOf(v) - if mtype == nil { - return []byte{}, errors.New("nil cannot be marshaled to TOML") - } - - switch mtype.Kind() { - case reflect.Struct, reflect.Map: - case reflect.Ptr: - if mtype.Elem().Kind() != reflect.Struct { - return []byte{}, errors.New("Only pointer to struct can be marshaled to TOML") - } - if reflect.ValueOf(v).IsNil() { - return []byte{}, errors.New("nil pointer cannot be marshaled to TOML") - } - default: - return []byte{}, errors.New("Only a struct or map can be marshaled to TOML") - } - - sval := reflect.ValueOf(v) - if isCustomMarshaler(mtype) { - return callCustomMarshaler(sval) - } - if isTextMarshaler(mtype) { - return callTextMarshaler(sval) - } - t, err := e.valueToTree(mtype, sval) - if err != nil { - return []byte{}, err - } - - var buf bytes.Buffer - _, err = t.writeToOrdered(&buf, "", "", 0, e.arraysOneElementPerLine, e.order, e.indentation, e.compactComments, false) - - return buf.Bytes(), err -} - -// Create next tree with a position based on Encoder.line -func (e *Encoder) nextTree() *Tree { - return newTreeWithPosition(Position{Line: e.line, Col: 1}) -} - -// Convert given marshal struct or map value to toml tree -func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) { - if mtype.Kind() == reflect.Ptr { - return e.valueToTree(mtype.Elem(), mval.Elem()) - } - tval := e.nextTree() - switch mtype.Kind() { - case reflect.Struct: - switch mval.Interface().(type) { - case Tree: - reflect.ValueOf(tval).Elem().Set(mval) - default: - for i := 0; i < mtype.NumField(); i++ { - mtypef, mvalf := mtype.Field(i), mval.Field(i) - opts := tomlOptions(mtypef, e.annotation) - if opts.include && ((mtypef.Type.Kind() != reflect.Interface && !opts.omitempty) || !isZero(mvalf)) { - val, err := e.valueToToml(mtypef.Type, mvalf) - if err != nil { - return nil, err - } - if tree, ok := val.(*Tree); ok && mtypef.Anonymous && !opts.nameFromTag && !e.promoteAnon { - e.appendTree(tval, tree) - } else { - val = e.wrapTomlValue(val, tval) - tval.SetPathWithOptions([]string{opts.name}, SetOptions{ - Comment: opts.comment, - Commented: opts.commented, - Multiline: opts.multiline, - Literal: opts.literal, - }, val) - } - } - } - } - case reflect.Map: - keys := mval.MapKeys() - if e.order == OrderPreserve && len(keys) > 0 { - // Sorting []reflect.Value is not straight forward. - // - // OrderPreserve will support deterministic results when string is used - // as the key to maps. - typ := keys[0].Type() - kind := keys[0].Kind() - if kind == reflect.String { - ikeys := make([]string, len(keys)) - for i := range keys { - ikeys[i] = keys[i].Interface().(string) - } - sort.Strings(ikeys) - for i := range ikeys { - keys[i] = reflect.ValueOf(ikeys[i]).Convert(typ) - } - } - } - for _, key := range keys { - mvalf := mval.MapIndex(key) - if (mtype.Elem().Kind() == reflect.Ptr || mtype.Elem().Kind() == reflect.Interface) && mvalf.IsNil() { - continue - } - val, err := e.valueToToml(mtype.Elem(), mvalf) - if err != nil { - return nil, err - } - val = e.wrapTomlValue(val, tval) - if e.quoteMapKeys { - keyStr, err := tomlValueStringRepresentation(key.String(), "", "", e.order, e.arraysOneElementPerLine) - if err != nil { - return nil, err - } - tval.SetPath([]string{keyStr}, val) - } else { - tval.SetPath([]string{key.String()}, val) - } - } - } - return tval, nil -} - -// Convert given marshal slice to slice of Toml trees -func (e *Encoder) valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) { - tval := make([]*Tree, mval.Len(), mval.Len()) - for i := 0; i < mval.Len(); i++ { - val, err := e.valueToTree(mtype.Elem(), mval.Index(i)) - if err != nil { - return nil, err - } - tval[i] = val - } - return tval, nil -} - -// Convert given marshal slice to slice of toml values -func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) { - tval := make([]interface{}, mval.Len(), mval.Len()) - for i := 0; i < mval.Len(); i++ { - val, err := e.valueToToml(mtype.Elem(), mval.Index(i)) - if err != nil { - return nil, err - } - tval[i] = val - } - return tval, nil -} - -// Convert given marshal value to toml value -func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) { - if mtype.Kind() == reflect.Ptr { - switch { - case isCustomMarshaler(mtype): - return callCustomMarshaler(mval) - case isTextMarshaler(mtype): - b, err := callTextMarshaler(mval) - return string(b), err - default: - return e.valueToToml(mtype.Elem(), mval.Elem()) - } - } - if mtype.Kind() == reflect.Interface { - return e.valueToToml(mval.Elem().Type(), mval.Elem()) - } - switch { - case isCustomMarshaler(mtype): - return callCustomMarshaler(mval) - case isTextMarshaler(mtype): - b, err := callTextMarshaler(mval) - return string(b), err - case isTree(mtype): - return e.valueToTree(mtype, mval) - case isOtherSequence(mtype), isCustomMarshalerSequence(mtype), isTextMarshalerSequence(mtype): - return e.valueToOtherSlice(mtype, mval) - case isTreeSequence(mtype): - return e.valueToTreeSlice(mtype, mval) - default: - switch mtype.Kind() { - case reflect.Bool: - return mval.Bool(), nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) { - return fmt.Sprint(mval), nil - } - return mval.Int(), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return mval.Uint(), nil - case reflect.Float32, reflect.Float64: - return mval.Float(), nil - case reflect.String: - return mval.String(), nil - case reflect.Struct: - return mval.Interface(), nil - default: - return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind()) - } - } -} - -func (e *Encoder) appendTree(t, o *Tree) error { - for key, value := range o.values { - if _, ok := t.values[key]; ok { - continue - } - if tomlValue, ok := value.(*tomlValue); ok { - tomlValue.position.Col = t.position.Col - } - t.values[key] = value - } - return nil -} - -// Create a toml value with the current line number as the position line -func (e *Encoder) wrapTomlValue(val interface{}, parent *Tree) interface{} { - _, isTree := val.(*Tree) - _, isTreeS := val.([]*Tree) - if isTree || isTreeS { - e.line++ - return val - } - - ret := &tomlValue{ - value: val, - position: Position{ - e.line, - parent.position.Col, - }, - } - e.line++ - return ret -} - -// Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v. -// Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for -// sub-structs, and only definite types can be unmarshaled. -func (t *Tree) Unmarshal(v interface{}) error { - d := Decoder{tval: t, tagName: tagFieldName} - return d.unmarshal(v) -} - -// Marshal returns the TOML encoding of Tree. -// See Marshal() documentation for types mapping table. -func (t *Tree) Marshal() ([]byte, error) { - var buf bytes.Buffer - _, err := t.WriteTo(&buf) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// Unmarshal parses the TOML-encoded data and stores the result in the value -// pointed to by v. Behavior is similar to the Go json encoder, except that there -// is no concept of an Unmarshaler interface or UnmarshalTOML function for -// sub-structs, and currently only definite types can be unmarshaled to (i.e. no -// `interface{}`). -// -// The following struct annotations are supported: -// -// toml:"Field" Overrides the field's name to map to. -// default:"foo" Provides a default value. -// -// For default values, only fields of the following types are supported: -// * string -// * bool -// * int -// * int64 -// * float64 -// -// See Marshal() documentation for types mapping table. -func Unmarshal(data []byte, v interface{}) error { - t, err := LoadReader(bytes.NewReader(data)) - if err != nil { - return err - } - return t.Unmarshal(v) -} - -// Decoder reads and decodes TOML values from an input stream. -type Decoder struct { - r io.Reader - tval *Tree - encOpts - tagName string - strict bool - visitor visitorState -} - -// NewDecoder returns a new decoder that reads from r. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{ - r: r, - encOpts: encOptsDefaults, - tagName: tagFieldName, - } -} - -// Decode reads a TOML-encoded value from it's input -// and unmarshals it in the value pointed at by v. -// -// See the documentation for Marshal for details. -func (d *Decoder) Decode(v interface{}) error { - var err error - d.tval, err = LoadReader(d.r) - if err != nil { - return err - } - return d.unmarshal(v) -} - -// SetTagName allows changing default tag "toml" -func (d *Decoder) SetTagName(v string) *Decoder { - d.tagName = v - return d -} - -// Strict allows changing to strict decoding. Any fields that are found in the -// input data and do not have a corresponding struct member cause an error. -func (d *Decoder) Strict(strict bool) *Decoder { - d.strict = strict - return d -} - -func (d *Decoder) unmarshal(v interface{}) error { - mtype := reflect.TypeOf(v) - if mtype == nil { - return errors.New("nil cannot be unmarshaled from TOML") - } - if mtype.Kind() != reflect.Ptr { - return errors.New("only a pointer to struct or map can be unmarshaled from TOML") - } - - elem := mtype.Elem() - - switch elem.Kind() { - case reflect.Struct, reflect.Map: - case reflect.Interface: - elem = mapStringInterfaceType - default: - return errors.New("only a pointer to struct or map can be unmarshaled from TOML") - } - - if reflect.ValueOf(v).IsNil() { - return errors.New("nil pointer cannot be unmarshaled from TOML") - } - - vv := reflect.ValueOf(v).Elem() - - if d.strict { - d.visitor = newVisitorState(d.tval) - } - - sval, err := d.valueFromTree(elem, d.tval, &vv) - if err != nil { - return err - } - if err := d.visitor.validate(); err != nil { - return err - } - reflect.ValueOf(v).Elem().Set(sval) - return nil -} - -// Convert toml tree to marshal struct or map, using marshal type. When mval1 -// is non-nil, merge fields into the given value instead of allocating a new one. -func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree, mval1 *reflect.Value) (reflect.Value, error) { - if mtype.Kind() == reflect.Ptr { - return d.unwrapPointer(mtype, tval, mval1) - } - - // Check if pointer to value implements the Unmarshaler interface. - if mvalPtr := reflect.New(mtype); isCustomUnmarshaler(mvalPtr.Type()) { - d.visitor.visitAll() - - if tval == nil { - return mvalPtr.Elem(), nil - } - - if err := callCustomUnmarshaler(mvalPtr, tval.ToMap()); err != nil { - return reflect.ValueOf(nil), fmt.Errorf("unmarshal toml: %v", err) - } - return mvalPtr.Elem(), nil - } - - var mval reflect.Value - switch mtype.Kind() { - case reflect.Struct: - if mval1 != nil { - mval = *mval1 - } else { - mval = reflect.New(mtype).Elem() - } - - switch mval.Interface().(type) { - case Tree: - mval.Set(reflect.ValueOf(tval).Elem()) - default: - for i := 0; i < mtype.NumField(); i++ { - mtypef := mtype.Field(i) - an := annotation{tag: d.tagName} - opts := tomlOptions(mtypef, an) - if !opts.include { - continue - } - baseKey := opts.name - keysToTry := []string{ - baseKey, - strings.ToLower(baseKey), - strings.ToTitle(baseKey), - strings.ToLower(string(baseKey[0])) + baseKey[1:], - } - - found := false - if tval != nil { - for _, key := range keysToTry { - exists := tval.HasPath([]string{key}) - if !exists { - continue - } - - d.visitor.push(key) - val := tval.GetPath([]string{key}) - fval := mval.Field(i) - mvalf, err := d.valueFromToml(mtypef.Type, val, &fval) - if err != nil { - return mval, formatError(err, tval.GetPositionPath([]string{key})) - } - mval.Field(i).Set(mvalf) - found = true - d.visitor.pop() - break - } - } - - if !found && opts.defaultValue != "" { - mvalf := mval.Field(i) - var val interface{} - var err error - switch mvalf.Kind() { - case reflect.String: - val = opts.defaultValue - case reflect.Bool: - val, err = strconv.ParseBool(opts.defaultValue) - case reflect.Uint: - val, err = strconv.ParseUint(opts.defaultValue, 10, 0) - case reflect.Uint8: - val, err = strconv.ParseUint(opts.defaultValue, 10, 8) - case reflect.Uint16: - val, err = strconv.ParseUint(opts.defaultValue, 10, 16) - case reflect.Uint32: - val, err = strconv.ParseUint(opts.defaultValue, 10, 32) - case reflect.Uint64: - val, err = strconv.ParseUint(opts.defaultValue, 10, 64) - case reflect.Int: - val, err = strconv.ParseInt(opts.defaultValue, 10, 0) - case reflect.Int8: - val, err = strconv.ParseInt(opts.defaultValue, 10, 8) - case reflect.Int16: - val, err = strconv.ParseInt(opts.defaultValue, 10, 16) - case reflect.Int32: - val, err = strconv.ParseInt(opts.defaultValue, 10, 32) - case reflect.Int64: - // Check if the provided number has a non-numeric extension. - var hasExtension bool - if len(opts.defaultValue) > 0 { - lastChar := opts.defaultValue[len(opts.defaultValue)-1] - if lastChar < '0' || lastChar > '9' { - hasExtension = true - } - } - // If the value is a time.Duration with extension, parse as duration. - // If the value is an int64 or a time.Duration without extension, parse as number. - if hasExtension && mvalf.Type().String() == "time.Duration" { - val, err = time.ParseDuration(opts.defaultValue) - } else { - val, err = strconv.ParseInt(opts.defaultValue, 10, 64) - } - case reflect.Float32: - val, err = strconv.ParseFloat(opts.defaultValue, 32) - case reflect.Float64: - val, err = strconv.ParseFloat(opts.defaultValue, 64) - default: - return mvalf, fmt.Errorf("unsupported field type for default option") - } - - if err != nil { - return mvalf, err - } - mvalf.Set(reflect.ValueOf(val).Convert(mvalf.Type())) - } - - // save the old behavior above and try to check structs - if !found && opts.defaultValue == "" && mtypef.Type.Kind() == reflect.Struct { - tmpTval := tval - if !mtypef.Anonymous { - tmpTval = nil - } - fval := mval.Field(i) - v, err := d.valueFromTree(mtypef.Type, tmpTval, &fval) - if err != nil { - return v, err - } - mval.Field(i).Set(v) - } - } - } - case reflect.Map: - mval = reflect.MakeMap(mtype) - for _, key := range tval.Keys() { - d.visitor.push(key) - // TODO: path splits key - val := tval.GetPath([]string{key}) - mvalf, err := d.valueFromToml(mtype.Elem(), val, nil) - if err != nil { - return mval, formatError(err, tval.GetPositionPath([]string{key})) - } - mval.SetMapIndex(reflect.ValueOf(key).Convert(mtype.Key()), mvalf) - d.visitor.pop() - } - } - return mval, nil -} - -// Convert toml value to marshal struct/map slice, using marshal type -func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) { - mval, err := makeSliceOrArray(mtype, len(tval)) - if err != nil { - return mval, err - } - - for i := 0; i < len(tval); i++ { - d.visitor.push(strconv.Itoa(i)) - val, err := d.valueFromTree(mtype.Elem(), tval[i], nil) - if err != nil { - return mval, err - } - mval.Index(i).Set(val) - d.visitor.pop() - } - return mval, nil -} - -// Convert toml value to marshal primitive slice, using marshal type -func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) { - mval, err := makeSliceOrArray(mtype, len(tval)) - if err != nil { - return mval, err - } - - for i := 0; i < len(tval); i++ { - val, err := d.valueFromToml(mtype.Elem(), tval[i], nil) - if err != nil { - return mval, err - } - mval.Index(i).Set(val) - } - return mval, nil -} - -// Convert toml value to marshal primitive slice, using marshal type -func (d *Decoder) valueFromOtherSliceI(mtype reflect.Type, tval interface{}) (reflect.Value, error) { - val := reflect.ValueOf(tval) - length := val.Len() - - mval, err := makeSliceOrArray(mtype, length) - if err != nil { - return mval, err - } - - for i := 0; i < length; i++ { - val, err := d.valueFromToml(mtype.Elem(), val.Index(i).Interface(), nil) - if err != nil { - return mval, err - } - mval.Index(i).Set(val) - } - return mval, nil -} - -// Create a new slice or a new array with specified length -func makeSliceOrArray(mtype reflect.Type, tLength int) (reflect.Value, error) { - var mval reflect.Value - switch mtype.Kind() { - case reflect.Slice: - mval = reflect.MakeSlice(mtype, tLength, tLength) - case reflect.Array: - mval = reflect.New(reflect.ArrayOf(mtype.Len(), mtype.Elem())).Elem() - if tLength > mtype.Len() { - return mval, fmt.Errorf("unmarshal: TOML array length (%v) exceeds destination array length (%v)", tLength, mtype.Len()) - } - } - return mval, nil -} - -// Convert toml value to marshal value, using marshal type. When mval1 is non-nil -// and the given type is a struct value, merge fields into it. -func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { - if mtype.Kind() == reflect.Ptr { - return d.unwrapPointer(mtype, tval, mval1) - } - - switch t := tval.(type) { - case *Tree: - var mval11 *reflect.Value - if mtype.Kind() == reflect.Struct { - mval11 = mval1 - } - - if isTree(mtype) { - return d.valueFromTree(mtype, t, mval11) - } - - if mtype.Kind() == reflect.Interface { - if mval1 == nil || mval1.IsNil() { - return d.valueFromTree(reflect.TypeOf(map[string]interface{}{}), t, nil) - } else { - return d.valueFromToml(mval1.Elem().Type(), t, nil) - } - } - - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval) - case []*Tree: - if isTreeSequence(mtype) { - return d.valueFromTreeSlice(mtype, t) - } - if mtype.Kind() == reflect.Interface { - if mval1 == nil || mval1.IsNil() { - return d.valueFromTreeSlice(reflect.TypeOf([]map[string]interface{}{}), t) - } else { - ival := mval1.Elem() - return d.valueFromToml(mval1.Elem().Type(), t, &ival) - } - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval) - case []interface{}: - d.visitor.visit() - if isOtherSequence(mtype) { - return d.valueFromOtherSlice(mtype, t) - } - if mtype.Kind() == reflect.Interface { - if mval1 == nil || mval1.IsNil() { - return d.valueFromOtherSlice(reflect.TypeOf([]interface{}{}), t) - } else { - ival := mval1.Elem() - return d.valueFromToml(mval1.Elem().Type(), t, &ival) - } - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval) - default: - d.visitor.visit() - mvalPtr := reflect.New(mtype) - - // Check if pointer to value implements the Unmarshaler interface. - if isCustomUnmarshaler(mvalPtr.Type()) { - if err := callCustomUnmarshaler(mvalPtr, tval); err != nil { - return reflect.ValueOf(nil), fmt.Errorf("unmarshal toml: %v", err) - } - return mvalPtr.Elem(), nil - } - - // Check if pointer to value implements the encoding.TextUnmarshaler. - if isTextUnmarshaler(mvalPtr.Type()) && !isTimeType(mtype) { - if err := d.unmarshalText(tval, mvalPtr); err != nil { - return reflect.ValueOf(nil), fmt.Errorf("unmarshal text: %v", err) - } - return mvalPtr.Elem(), nil - } - - switch mtype.Kind() { - case reflect.Bool, reflect.Struct: - val := reflect.ValueOf(tval) - - switch val.Type() { - case localDateType: - localDate := val.Interface().(LocalDate) - switch mtype { - case timeType: - return reflect.ValueOf(time.Date(localDate.Year, localDate.Month, localDate.Day, 0, 0, 0, 0, time.Local)), nil - } - case localDateTimeType: - localDateTime := val.Interface().(LocalDateTime) - switch mtype { - case timeType: - return reflect.ValueOf(time.Date( - localDateTime.Date.Year, - localDateTime.Date.Month, - localDateTime.Date.Day, - localDateTime.Time.Hour, - localDateTime.Time.Minute, - localDateTime.Time.Second, - localDateTime.Time.Nanosecond, - time.Local)), nil - } - } - - // if this passes for when mtype is reflect.Struct, tval is a time.LocalTime - if !val.Type().ConvertibleTo(mtype) { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.String: - val := reflect.ValueOf(tval) - // stupidly, int64 is convertible to string. So special case this. - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - val := reflect.ValueOf(tval) - if mtype.Kind() == reflect.Int64 && mtype == reflect.TypeOf(time.Duration(1)) && val.Kind() == reflect.String { - d, err := time.ParseDuration(val.String()) - if err != nil { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v. %s", tval, tval, mtype.String(), err) - } - return reflect.ValueOf(d), nil - } - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Convert(reflect.TypeOf(int64(0))).Int()) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Float64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - - if val.Type().Kind() != reflect.Uint64 && val.Convert(reflect.TypeOf(int(1))).Int() < 0 { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String()) - } - if reflect.Indirect(reflect.New(mtype)).OverflowUint(val.Convert(reflect.TypeOf(uint64(0))).Uint()) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Float32, reflect.Float64: - val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Convert(reflect.TypeOf(float64(0))).Float()) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Interface: - if mval1 == nil || mval1.IsNil() { - return reflect.ValueOf(tval), nil - } else { - ival := mval1.Elem() - return d.valueFromToml(mval1.Elem().Type(), t, &ival) - } - case reflect.Slice, reflect.Array: - if isOtherSequence(mtype) && isOtherSequence(reflect.TypeOf(t)) { - return d.valueFromOtherSliceI(mtype, t) - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) - default: - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) - } - } -} - -func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) { - var melem *reflect.Value - - if mval1 != nil && !mval1.IsNil() && (mtype.Elem().Kind() == reflect.Struct || mtype.Elem().Kind() == reflect.Interface) { - elem := mval1.Elem() - melem = &elem - } - - val, err := d.valueFromToml(mtype.Elem(), tval, melem) - if err != nil { - return reflect.ValueOf(nil), err - } - mval := reflect.New(mtype.Elem()) - mval.Elem().Set(val) - return mval, nil -} - -func (d *Decoder) unmarshalText(tval interface{}, mval reflect.Value) error { - var buf bytes.Buffer - fmt.Fprint(&buf, tval) - return callTextUnmarshaler(mval, buf.Bytes()) -} - -func tomlOptions(vf reflect.StructField, an annotation) tomlOpts { - tag := vf.Tag.Get(an.tag) - parse := strings.Split(tag, ",") - var comment string - if c := vf.Tag.Get(an.comment); c != "" { - comment = c - } - commented, _ := strconv.ParseBool(vf.Tag.Get(an.commented)) - multiline, _ := strconv.ParseBool(vf.Tag.Get(an.multiline)) - literal, _ := strconv.ParseBool(vf.Tag.Get(an.literal)) - defaultValue := vf.Tag.Get(tagDefault) - result := tomlOpts{ - name: vf.Name, - nameFromTag: false, - comment: comment, - commented: commented, - multiline: multiline, - literal: literal, - include: true, - omitempty: false, - defaultValue: defaultValue, - } - if parse[0] != "" { - if parse[0] == "-" && len(parse) == 1 { - result.include = false - } else { - result.name = strings.Trim(parse[0], " ") - result.nameFromTag = true - } - } - if vf.PkgPath != "" { - result.include = false - } - if len(parse) > 1 && strings.Trim(parse[1], " ") == "omitempty" { - result.omitempty = true - } - if vf.Type.Kind() == reflect.Ptr { - result.omitempty = true - } - return result -} - -func isZero(val reflect.Value) bool { - switch val.Type().Kind() { - case reflect.Slice, reflect.Array, reflect.Map: - return val.Len() == 0 - default: - return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface()) - } -} - -func formatError(err error, pos Position) error { - if err.Error()[0] == '(' { // Error already contains position information - return err - } - return fmt.Errorf("%s: %s", pos, err) -} - -// visitorState keeps track of which keys were unmarshaled. -type visitorState struct { - tree *Tree - path []string - keys map[string]struct{} - active bool -} - -func newVisitorState(tree *Tree) visitorState { - path, result := []string{}, map[string]struct{}{} - insertKeys(path, result, tree) - return visitorState{ - tree: tree, - path: path[:0], - keys: result, - active: true, - } -} - -func (s *visitorState) push(key string) { - if s.active { - s.path = append(s.path, key) - } -} - -func (s *visitorState) pop() { - if s.active { - s.path = s.path[:len(s.path)-1] - } -} - -func (s *visitorState) visit() { - if s.active { - delete(s.keys, strings.Join(s.path, ".")) - } -} - -func (s *visitorState) visitAll() { - if s.active { - for k := range s.keys { - if strings.HasPrefix(k, strings.Join(s.path, ".")) { - delete(s.keys, k) - } - } - } -} - -func (s *visitorState) validate() error { - if !s.active { - return nil - } - undecoded := make([]string, 0, len(s.keys)) - for key := range s.keys { - undecoded = append(undecoded, key) - } - sort.Strings(undecoded) - if len(undecoded) > 0 { - return fmt.Errorf("undecoded keys: %q", undecoded) - } - return nil -} - -func insertKeys(path []string, m map[string]struct{}, tree *Tree) { - for k, v := range tree.values { - switch node := v.(type) { - case []*Tree: - for i, item := range node { - insertKeys(append(path, k, strconv.Itoa(i)), m, item) - } - case *Tree: - insertKeys(append(path, k), m, node) - case *tomlValue: - m[strings.Join(append(path, k), ".")] = struct{}{} - } - } -} diff --git a/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml b/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml deleted file mode 100644 index 792b72ed72..0000000000 --- a/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml +++ /dev/null @@ -1,39 +0,0 @@ -title = "TOML Marshal Testing" - -[basic_lists] - floats = [12.3,45.6,78.9] - bools = [true,false,true] - dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z] - ints = [8001,8001,8002] - uints = [5002,5003] - strings = ["One","Two","Three"] - -[[subdocptrs]] - name = "Second" - -[basic_map] - one = "one" - two = "two" - -[subdoc] - - [subdoc.second] - name = "Second" - - [subdoc.first] - name = "First" - -[basic] - uint = 5001 - bool = true - float = 123.4 - float64 = 123.456782132399 - int = 5000 - string = "Bite me" - date = 1979-05-27T07:32:00Z - -[[subdoclist]] - name = "List.First" - -[[subdoclist]] - name = "List.Second" diff --git a/vendor/github.com/pelletier/go-toml/marshal_test.toml b/vendor/github.com/pelletier/go-toml/marshal_test.toml deleted file mode 100644 index ba5e110bf0..0000000000 --- a/vendor/github.com/pelletier/go-toml/marshal_test.toml +++ /dev/null @@ -1,39 +0,0 @@ -title = "TOML Marshal Testing" - -[basic] - bool = true - date = 1979-05-27T07:32:00Z - float = 123.4 - float64 = 123.456782132399 - int = 5000 - string = "Bite me" - uint = 5001 - -[basic_lists] - bools = [true,false,true] - dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z] - floats = [12.3,45.6,78.9] - ints = [8001,8001,8002] - strings = ["One","Two","Three"] - uints = [5002,5003] - -[basic_map] - one = "one" - two = "two" - -[subdoc] - - [subdoc.first] - name = "First" - - [subdoc.second] - name = "Second" - -[[subdoclist]] - name = "List.First" - -[[subdoclist]] - name = "List.Second" - -[[subdocptrs]] - name = "Second" diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go deleted file mode 100644 index b3726d0dd8..0000000000 --- a/vendor/github.com/pelletier/go-toml/parser.go +++ /dev/null @@ -1,507 +0,0 @@ -// TOML Parser. - -package toml - -import ( - "errors" - "fmt" - "math" - "reflect" - "strconv" - "strings" - "time" -) - -type tomlParser struct { - flowIdx int - flow []token - tree *Tree - currentTable []string - seenTableKeys []string -} - -type tomlParserStateFn func() tomlParserStateFn - -// Formats and panics an error message based on a token -func (p *tomlParser) raiseError(tok *token, msg string, args ...interface{}) { - panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...)) -} - -func (p *tomlParser) run() { - for state := p.parseStart; state != nil; { - state = state() - } -} - -func (p *tomlParser) peek() *token { - if p.flowIdx >= len(p.flow) { - return nil - } - return &p.flow[p.flowIdx] -} - -func (p *tomlParser) assume(typ tokenType) { - tok := p.getToken() - if tok == nil { - p.raiseError(tok, "was expecting token %s, but token stream is empty", tok) - } - if tok.typ != typ { - p.raiseError(tok, "was expecting token %s, but got %s instead", typ, tok) - } -} - -func (p *tomlParser) getToken() *token { - tok := p.peek() - if tok == nil { - return nil - } - p.flowIdx++ - return tok -} - -func (p *tomlParser) parseStart() tomlParserStateFn { - tok := p.peek() - - // end of stream, parsing is finished - if tok == nil { - return nil - } - - switch tok.typ { - case tokenDoubleLeftBracket: - return p.parseGroupArray - case tokenLeftBracket: - return p.parseGroup - case tokenKey: - return p.parseAssign - case tokenEOF: - return nil - case tokenError: - p.raiseError(tok, "parsing error: %s", tok.String()) - default: - p.raiseError(tok, "unexpected token %s", tok.typ) - } - return nil -} - -func (p *tomlParser) parseGroupArray() tomlParserStateFn { - startToken := p.getToken() // discard the [[ - key := p.getToken() - if key.typ != tokenKeyGroupArray { - p.raiseError(key, "unexpected token %s, was expecting a table array key", key) - } - - // get or create table array element at the indicated part in the path - keys, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid table array key: %s", err) - } - p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries - destTree := p.tree.GetPath(keys) - var array []*Tree - if destTree == nil { - array = make([]*Tree, 0) - } else if target, ok := destTree.([]*Tree); ok && target != nil { - array = destTree.([]*Tree) - } else { - p.raiseError(key, "key %s is already assigned and not of type table array", key) - } - p.currentTable = keys - - // add a new tree to the end of the table array - newTree := newTree() - newTree.position = startToken.Position - array = append(array, newTree) - p.tree.SetPath(p.currentTable, array) - - // remove all keys that were children of this table array - prefix := key.val + "." - found := false - for ii := 0; ii < len(p.seenTableKeys); { - tableKey := p.seenTableKeys[ii] - if strings.HasPrefix(tableKey, prefix) { - p.seenTableKeys = append(p.seenTableKeys[:ii], p.seenTableKeys[ii+1:]...) - } else { - found = (tableKey == key.val) - ii++ - } - } - - // keep this key name from use by other kinds of assignments - if !found { - p.seenTableKeys = append(p.seenTableKeys, key.val) - } - - // move to next parser state - p.assume(tokenDoubleRightBracket) - return p.parseStart -} - -func (p *tomlParser) parseGroup() tomlParserStateFn { - startToken := p.getToken() // discard the [ - key := p.getToken() - if key.typ != tokenKeyGroup { - p.raiseError(key, "unexpected token %s, was expecting a table key", key) - } - for _, item := range p.seenTableKeys { - if item == key.val { - p.raiseError(key, "duplicated tables") - } - } - - p.seenTableKeys = append(p.seenTableKeys, key.val) - keys, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid table array key: %s", err) - } - if err := p.tree.createSubTree(keys, startToken.Position); err != nil { - p.raiseError(key, "%s", err) - } - destTree := p.tree.GetPath(keys) - if target, ok := destTree.(*Tree); ok && target != nil && target.inline { - p.raiseError(key, "could not re-define exist inline table or its sub-table : %s", - strings.Join(keys, ".")) - } - p.assume(tokenRightBracket) - p.currentTable = keys - return p.parseStart -} - -func (p *tomlParser) parseAssign() tomlParserStateFn { - key := p.getToken() - p.assume(tokenEqual) - - parsedKey, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid key: %s", err.Error()) - } - - value := p.parseRvalue() - var tableKey []string - if len(p.currentTable) > 0 { - tableKey = p.currentTable - } else { - tableKey = []string{} - } - - prefixKey := parsedKey[0 : len(parsedKey)-1] - tableKey = append(tableKey, prefixKey...) - - // find the table to assign, looking out for arrays of tables - var targetNode *Tree - switch node := p.tree.GetPath(tableKey).(type) { - case []*Tree: - targetNode = node[len(node)-1] - case *Tree: - targetNode = node - case nil: - // create intermediate - if err := p.tree.createSubTree(tableKey, key.Position); err != nil { - p.raiseError(key, "could not create intermediate group: %s", err) - } - targetNode = p.tree.GetPath(tableKey).(*Tree) - default: - p.raiseError(key, "Unknown table type for path: %s", - strings.Join(tableKey, ".")) - } - - if targetNode.inline { - p.raiseError(key, "could not add key or sub-table to exist inline table or its sub-table : %s", - strings.Join(tableKey, ".")) - } - - // assign value to the found table - keyVal := parsedKey[len(parsedKey)-1] - localKey := []string{keyVal} - finalKey := append(tableKey, keyVal) - if targetNode.GetPath(localKey) != nil { - p.raiseError(key, "The following key was defined twice: %s", - strings.Join(finalKey, ".")) - } - var toInsert interface{} - - switch value.(type) { - case *Tree, []*Tree: - toInsert = value - default: - toInsert = &tomlValue{value: value, position: key.Position} - } - targetNode.values[keyVal] = toInsert - return p.parseStart -} - -var errInvalidUnderscore = errors.New("invalid use of _ in number") - -func numberContainsInvalidUnderscore(value string) error { - // For large numbers, you may use underscores between digits to enhance - // readability. Each underscore must be surrounded by at least one digit on - // each side. - - hasBefore := false - for idx, r := range value { - if r == '_' { - if !hasBefore || idx+1 >= len(value) { - // can't end with an underscore - return errInvalidUnderscore - } - } - hasBefore = isDigit(r) - } - return nil -} - -var errInvalidUnderscoreHex = errors.New("invalid use of _ in hex number") - -func hexNumberContainsInvalidUnderscore(value string) error { - hasBefore := false - for idx, r := range value { - if r == '_' { - if !hasBefore || idx+1 >= len(value) { - // can't end with an underscore - return errInvalidUnderscoreHex - } - } - hasBefore = isHexDigit(r) - } - return nil -} - -func cleanupNumberToken(value string) string { - cleanedVal := strings.Replace(value, "_", "", -1) - return cleanedVal -} - -func (p *tomlParser) parseRvalue() interface{} { - tok := p.getToken() - if tok == nil || tok.typ == tokenEOF { - p.raiseError(tok, "expecting a value") - } - - switch tok.typ { - case tokenString: - return tok.val - case tokenTrue: - return true - case tokenFalse: - return false - case tokenInf: - if tok.val[0] == '-' { - return math.Inf(-1) - } - return math.Inf(1) - case tokenNan: - return math.NaN() - case tokenInteger: - cleanedVal := cleanupNumberToken(tok.val) - base := 10 - s := cleanedVal - checkInvalidUnderscore := numberContainsInvalidUnderscore - if len(cleanedVal) >= 3 && cleanedVal[0] == '0' { - switch cleanedVal[1] { - case 'x': - checkInvalidUnderscore = hexNumberContainsInvalidUnderscore - base = 16 - case 'o': - base = 8 - case 'b': - base = 2 - default: - panic("invalid base") // the lexer should catch this first - } - s = cleanedVal[2:] - } - - err := checkInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - - var val interface{} - val, err = strconv.ParseInt(s, base, 64) - if err == nil { - return val - } - - if s[0] != '-' { - if val, err = strconv.ParseUint(s, base, 64); err == nil { - return val - } - } - p.raiseError(tok, "%s", err) - case tokenFloat: - err := numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - cleanedVal := cleanupNumberToken(tok.val) - val, err := strconv.ParseFloat(cleanedVal, 64) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenLocalTime: - val, err := ParseLocalTime(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenLocalDate: - // a local date may be followed by: - // * nothing: this is a local date - // * a local time: this is a local date-time - - next := p.peek() - if next == nil || next.typ != tokenLocalTime { - val, err := ParseLocalDate(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - } - - localDate := tok - localTime := p.getToken() - - next = p.peek() - if next == nil || next.typ != tokenTimeOffset { - v := localDate.val + "T" + localTime.val - val, err := ParseLocalDateTime(v) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - } - - offset := p.getToken() - - layout := time.RFC3339Nano - v := localDate.val + "T" + localTime.val + offset.val - val, err := time.ParseInLocation(layout, v, time.UTC) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenLeftBracket: - return p.parseArray() - case tokenLeftCurlyBrace: - return p.parseInlineTable() - case tokenEqual: - p.raiseError(tok, "cannot have multiple equals for the same key") - case tokenError: - p.raiseError(tok, "%s", tok) - default: - panic(fmt.Errorf("unhandled token: %v", tok)) - } - - return nil -} - -func tokenIsComma(t *token) bool { - return t != nil && t.typ == tokenComma -} - -func (p *tomlParser) parseInlineTable() *Tree { - tree := newTree() - var previous *token -Loop: - for { - follow := p.peek() - if follow == nil || follow.typ == tokenEOF { - p.raiseError(follow, "unterminated inline table") - } - switch follow.typ { - case tokenRightCurlyBrace: - p.getToken() - break Loop - case tokenKey, tokenInteger, tokenString: - if !tokenIsComma(previous) && previous != nil { - p.raiseError(follow, "comma expected between fields in inline table") - } - key := p.getToken() - p.assume(tokenEqual) - - parsedKey, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid key: %s", err) - } - - value := p.parseRvalue() - tree.SetPath(parsedKey, value) - case tokenComma: - if tokenIsComma(previous) { - p.raiseError(follow, "need field between two commas in inline table") - } - p.getToken() - default: - p.raiseError(follow, "unexpected token type in inline table: %s", follow.String()) - } - previous = follow - } - if tokenIsComma(previous) { - p.raiseError(previous, "trailing comma at the end of inline table") - } - tree.inline = true - return tree -} - -func (p *tomlParser) parseArray() interface{} { - var array []interface{} - arrayType := reflect.TypeOf(newTree()) - for { - follow := p.peek() - if follow == nil || follow.typ == tokenEOF { - p.raiseError(follow, "unterminated array") - } - if follow.typ == tokenRightBracket { - p.getToken() - break - } - val := p.parseRvalue() - if reflect.TypeOf(val) != arrayType { - arrayType = nil - } - array = append(array, val) - follow = p.peek() - if follow == nil || follow.typ == tokenEOF { - p.raiseError(follow, "unterminated array") - } - if follow.typ != tokenRightBracket && follow.typ != tokenComma { - p.raiseError(follow, "missing comma") - } - if follow.typ == tokenComma { - p.getToken() - } - } - - // if the array is a mixed-type array or its length is 0, - // don't convert it to a table array - if len(array) <= 0 { - arrayType = nil - } - // An array of Trees is actually an array of inline - // tables, which is a shorthand for a table array. If the - // array was not converted from []interface{} to []*Tree, - // the two notations would not be equivalent. - if arrayType == reflect.TypeOf(newTree()) { - tomlArray := make([]*Tree, len(array)) - for i, v := range array { - tomlArray[i] = v.(*Tree) - } - return tomlArray - } - return array -} - -func parseToml(flow []token) *Tree { - result := newTree() - result.position = Position{1, 1} - parser := &tomlParser{ - flowIdx: 0, - flow: flow, - tree: result, - currentTable: make([]string, 0), - seenTableKeys: make([]string, 0), - } - parser.run() - return result -} diff --git a/vendor/github.com/pelletier/go-toml/position.go b/vendor/github.com/pelletier/go-toml/position.go deleted file mode 100644 index c17bff87ba..0000000000 --- a/vendor/github.com/pelletier/go-toml/position.go +++ /dev/null @@ -1,29 +0,0 @@ -// Position support for go-toml - -package toml - -import ( - "fmt" -) - -// Position of a document element within a TOML document. -// -// Line and Col are both 1-indexed positions for the element's line number and -// column number, respectively. Values of zero or less will cause Invalid(), -// to return true. -type Position struct { - Line int // line within the document - Col int // column within the line -} - -// String representation of the position. -// Displays 1-indexed line and column numbers. -func (p Position) String() string { - return fmt.Sprintf("(%d, %d)", p.Line, p.Col) -} - -// Invalid returns whether or not the position is valid (i.e. with negative or -// null values) -func (p Position) Invalid() bool { - return p.Line <= 0 || p.Col <= 0 -} diff --git a/vendor/github.com/pelletier/go-toml/token.go b/vendor/github.com/pelletier/go-toml/token.go deleted file mode 100644 index b437fdd3b6..0000000000 --- a/vendor/github.com/pelletier/go-toml/token.go +++ /dev/null @@ -1,136 +0,0 @@ -package toml - -import "fmt" - -// Define tokens -type tokenType int - -const ( - eof = -(iota + 1) -) - -const ( - tokenError tokenType = iota - tokenEOF - tokenComment - tokenKey - tokenString - tokenInteger - tokenTrue - tokenFalse - tokenFloat - tokenInf - tokenNan - tokenEqual - tokenLeftBracket - tokenRightBracket - tokenLeftCurlyBrace - tokenRightCurlyBrace - tokenLeftParen - tokenRightParen - tokenDoubleLeftBracket - tokenDoubleRightBracket - tokenLocalDate - tokenLocalTime - tokenTimeOffset - tokenKeyGroup - tokenKeyGroupArray - tokenComma - tokenColon - tokenDollar - tokenStar - tokenQuestion - tokenDot - tokenDotDot - tokenEOL -) - -var tokenTypeNames = []string{ - "Error", - "EOF", - "Comment", - "Key", - "String", - "Integer", - "True", - "False", - "Float", - "Inf", - "NaN", - "=", - "[", - "]", - "{", - "}", - "(", - ")", - "]]", - "[[", - "LocalDate", - "LocalTime", - "TimeOffset", - "KeyGroup", - "KeyGroupArray", - ",", - ":", - "$", - "*", - "?", - ".", - "..", - "EOL", -} - -type token struct { - Position - typ tokenType - val string -} - -func (tt tokenType) String() string { - idx := int(tt) - if idx < len(tokenTypeNames) { - return tokenTypeNames[idx] - } - return "Unknown" -} - -func (t token) String() string { - switch t.typ { - case tokenEOF: - return "EOF" - case tokenError: - return t.val - } - - return fmt.Sprintf("%q", t.val) -} - -func isSpace(r rune) bool { - return r == ' ' || r == '\t' -} - -func isAlphanumeric(r rune) bool { - return 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || r == '_' -} - -func isKeyChar(r rune) bool { - // Keys start with the first character that isn't whitespace or [ and end - // with the last non-whitespace character before the equals sign. Keys - // cannot contain a # character." - return !(r == '\r' || r == '\n' || r == eof || r == '=') -} - -func isKeyStartChar(r rune) bool { - return !(isSpace(r) || r == '\r' || r == '\n' || r == eof || r == '[') -} - -func isDigit(r rune) bool { - return '0' <= r && r <= '9' -} - -func isHexDigit(r rune) bool { - return isDigit(r) || - (r >= 'a' && r <= 'f') || - (r >= 'A' && r <= 'F') -} diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go deleted file mode 100644 index 5541b941f8..0000000000 --- a/vendor/github.com/pelletier/go-toml/toml.go +++ /dev/null @@ -1,533 +0,0 @@ -package toml - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - "strings" -) - -type tomlValue struct { - value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list - comment string - commented bool - multiline bool - literal bool - position Position -} - -// Tree is the result of the parsing of a TOML file. -type Tree struct { - values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree - comment string - commented bool - inline bool - position Position -} - -func newTree() *Tree { - return newTreeWithPosition(Position{}) -} - -func newTreeWithPosition(pos Position) *Tree { - return &Tree{ - values: make(map[string]interface{}), - position: pos, - } -} - -// TreeFromMap initializes a new Tree object using the given map. -func TreeFromMap(m map[string]interface{}) (*Tree, error) { - result, err := toTree(m) - if err != nil { - return nil, err - } - return result.(*Tree), nil -} - -// Position returns the position of the tree. -func (t *Tree) Position() Position { - return t.position -} - -// Has returns a boolean indicating if the given key exists. -func (t *Tree) Has(key string) bool { - if key == "" { - return false - } - return t.HasPath(strings.Split(key, ".")) -} - -// HasPath returns true if the given path of keys exists, false otherwise. -func (t *Tree) HasPath(keys []string) bool { - return t.GetPath(keys) != nil -} - -// Keys returns the keys of the toplevel tree (does not recurse). -func (t *Tree) Keys() []string { - keys := make([]string, len(t.values)) - i := 0 - for k := range t.values { - keys[i] = k - i++ - } - return keys -} - -// Get the value at key in the Tree. -// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. -// If you need to retrieve non-bare keys, use GetPath. -// Returns nil if the path does not exist in the tree. -// If keys is of length zero, the current tree is returned. -func (t *Tree) Get(key string) interface{} { - if key == "" { - return t - } - return t.GetPath(strings.Split(key, ".")) -} - -// GetPath returns the element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetPath(keys []string) interface{} { - if len(keys) == 0 { - return t - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return nil - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return nil - } - subtree = node[len(node)-1] - default: - return nil // cannot navigate through other node types - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - return node.value - default: - return node - } -} - -// GetArray returns the value at key in the Tree. -// It returns []string, []int64, etc type if key has homogeneous lists -// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. -// Returns nil if the path does not exist in the tree. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetArray(key string) interface{} { - if key == "" { - return t - } - return t.GetArrayPath(strings.Split(key, ".")) -} - -// GetArrayPath returns the element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetArrayPath(keys []string) interface{} { - if len(keys) == 0 { - return t - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return nil - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return nil - } - subtree = node[len(node)-1] - default: - return nil // cannot navigate through other node types - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - switch n := node.value.(type) { - case []interface{}: - return getArray(n) - default: - return node.value - } - default: - return node - } -} - -// if homogeneous array, then return slice type object over []interface{} -func getArray(n []interface{}) interface{} { - var s []string - var i64 []int64 - var f64 []float64 - var bl []bool - for _, value := range n { - switch v := value.(type) { - case string: - s = append(s, v) - case int64: - i64 = append(i64, v) - case float64: - f64 = append(f64, v) - case bool: - bl = append(bl, v) - default: - return n - } - } - if len(s) == len(n) { - return s - } else if len(i64) == len(n) { - return i64 - } else if len(f64) == len(n) { - return f64 - } else if len(bl) == len(n) { - return bl - } - return n -} - -// GetPosition returns the position of the given key. -func (t *Tree) GetPosition(key string) Position { - if key == "" { - return t.position - } - return t.GetPositionPath(strings.Split(key, ".")) -} - -// SetPositionPath sets the position of element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree position is set. -func (t *Tree) SetPositionPath(keys []string, pos Position) { - if len(keys) == 0 { - t.position = pos - return - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return - } - subtree = node[len(node)-1] - default: - return - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - node.position = pos - return - case *Tree: - node.position = pos - return - case []*Tree: - // go to most recent element - if len(node) == 0 { - return - } - node[len(node)-1].position = pos - return - } -} - -// GetPositionPath returns the element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetPositionPath(keys []string) Position { - if len(keys) == 0 { - return t.position - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return Position{0, 0} - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return Position{0, 0} - } - subtree = node[len(node)-1] - default: - return Position{0, 0} - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - return node.position - case *Tree: - return node.position - case []*Tree: - // go to most recent element - if len(node) == 0 { - return Position{0, 0} - } - return node[len(node)-1].position - default: - return Position{0, 0} - } -} - -// GetDefault works like Get but with a default value -func (t *Tree) GetDefault(key string, def interface{}) interface{} { - val := t.Get(key) - if val == nil { - return def - } - return val -} - -// SetOptions arguments are supplied to the SetWithOptions and SetPathWithOptions functions to modify marshalling behaviour. -// The default values within the struct are valid default options. -type SetOptions struct { - Comment string - Commented bool - Multiline bool - Literal bool -} - -// SetWithOptions is the same as Set, but allows you to provide formatting -// instructions to the key, that will be used by Marshal(). -func (t *Tree) SetWithOptions(key string, opts SetOptions, value interface{}) { - t.SetPathWithOptions(strings.Split(key, "."), opts, value) -} - -// SetPathWithOptions is the same as SetPath, but allows you to provide -// formatting instructions to the key, that will be reused by Marshal(). -func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interface{}) { - subtree := t - for i, intermediateKey := range keys[:len(keys)-1] { - nextTree, exists := subtree.values[intermediateKey] - if !exists { - nextTree = newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) - subtree.values[intermediateKey] = nextTree // add new element here - } - switch node := nextTree.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - // create element if it does not exist - node = append(node, newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col})) - subtree.values[intermediateKey] = node - } - subtree = node[len(node)-1] - } - } - - var toInsert interface{} - - switch v := value.(type) { - case *Tree: - v.comment = opts.Comment - v.commented = opts.Commented - toInsert = value - case []*Tree: - for i := range v { - v[i].commented = opts.Commented - } - toInsert = value - case *tomlValue: - v.comment = opts.Comment - v.commented = opts.Commented - v.multiline = opts.Multiline - v.literal = opts.Literal - toInsert = v - default: - toInsert = &tomlValue{value: value, - comment: opts.Comment, - commented: opts.Commented, - multiline: opts.Multiline, - literal: opts.Literal, - position: Position{Line: subtree.position.Line + len(subtree.values) + 1, Col: subtree.position.Col}} - } - - subtree.values[keys[len(keys)-1]] = toInsert -} - -// Set an element in the tree. -// Key is a dot-separated path (e.g. a.b.c). -// Creates all necessary intermediate trees, if needed. -func (t *Tree) Set(key string, value interface{}) { - t.SetWithComment(key, "", false, value) -} - -// SetWithComment is the same as Set, but allows you to provide comment -// information to the key, that will be reused by Marshal(). -func (t *Tree) SetWithComment(key string, comment string, commented bool, value interface{}) { - t.SetPathWithComment(strings.Split(key, "."), comment, commented, value) -} - -// SetPath sets an element in the tree. -// Keys is an array of path elements (e.g. {"a","b","c"}). -// Creates all necessary intermediate trees, if needed. -func (t *Tree) SetPath(keys []string, value interface{}) { - t.SetPathWithComment(keys, "", false, value) -} - -// SetPathWithComment is the same as SetPath, but allows you to provide comment -// information to the key, that will be reused by Marshal(). -func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) { - t.SetPathWithOptions(keys, SetOptions{Comment: comment, Commented: commented}, value) -} - -// Delete removes a key from the tree. -// Key is a dot-separated path (e.g. a.b.c). -func (t *Tree) Delete(key string) error { - keys, err := parseKey(key) - if err != nil { - return err - } - return t.DeletePath(keys) -} - -// DeletePath removes a key from the tree. -// Keys is an array of path elements (e.g. {"a","b","c"}). -func (t *Tree) DeletePath(keys []string) error { - keyLen := len(keys) - if keyLen == 1 { - delete(t.values, keys[0]) - return nil - } - tree := t.GetPath(keys[:keyLen-1]) - item := keys[keyLen-1] - switch node := tree.(type) { - case *Tree: - delete(node.values, item) - return nil - } - return errors.New("no such key to delete") -} - -// createSubTree takes a tree and a key and create the necessary intermediate -// subtrees to create a subtree at that point. In-place. -// -// e.g. passing a.b.c will create (assuming tree is empty) tree[a], tree[a][b] -// and tree[a][b][c] -// -// Returns nil on success, error object on failure -func (t *Tree) createSubTree(keys []string, pos Position) error { - subtree := t - for i, intermediateKey := range keys { - nextTree, exists := subtree.values[intermediateKey] - if !exists { - tree := newTreeWithPosition(Position{Line: t.position.Line + i, Col: t.position.Col}) - tree.position = pos - tree.inline = subtree.inline - subtree.values[intermediateKey] = tree - nextTree = tree - } - - switch node := nextTree.(type) { - case []*Tree: - subtree = node[len(node)-1] - case *Tree: - subtree = node - default: - return fmt.Errorf("unknown type for path %s (%s): %T (%#v)", - strings.Join(keys, "."), intermediateKey, nextTree, nextTree) - } - } - return nil -} - -// LoadBytes creates a Tree from a []byte. -func LoadBytes(b []byte) (tree *Tree, err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = fmt.Errorf("%s", r) - } - }() - - if len(b) >= 4 && (hasUTF32BigEndianBOM4(b) || hasUTF32LittleEndianBOM4(b)) { - b = b[4:] - } else if len(b) >= 3 && hasUTF8BOM3(b) { - b = b[3:] - } else if len(b) >= 2 && (hasUTF16BigEndianBOM2(b) || hasUTF16LittleEndianBOM2(b)) { - b = b[2:] - } - - tree = parseToml(lexToml(b)) - return -} - -func hasUTF16BigEndianBOM2(b []byte) bool { - return b[0] == 0xFE && b[1] == 0xFF -} - -func hasUTF16LittleEndianBOM2(b []byte) bool { - return b[0] == 0xFF && b[1] == 0xFE -} - -func hasUTF8BOM3(b []byte) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -func hasUTF32BigEndianBOM4(b []byte) bool { - return b[0] == 0x00 && b[1] == 0x00 && b[2] == 0xFE && b[3] == 0xFF -} - -func hasUTF32LittleEndianBOM4(b []byte) bool { - return b[0] == 0xFF && b[1] == 0xFE && b[2] == 0x00 && b[3] == 0x00 -} - -// LoadReader creates a Tree from any io.Reader. -func LoadReader(reader io.Reader) (tree *Tree, err error) { - inputBytes, err := ioutil.ReadAll(reader) - if err != nil { - return - } - tree, err = LoadBytes(inputBytes) - return -} - -// Load creates a Tree from a string. -func Load(content string) (tree *Tree, err error) { - return LoadBytes([]byte(content)) -} - -// LoadFile creates a Tree from a file. -func LoadFile(path string) (tree *Tree, err error) { - file, err := os.Open(path) - if err != nil { - return nil, err - } - defer file.Close() - return LoadReader(file) -} diff --git a/vendor/github.com/pelletier/go-toml/tomlpub.go b/vendor/github.com/pelletier/go-toml/tomlpub.go deleted file mode 100644 index 4136b46254..0000000000 --- a/vendor/github.com/pelletier/go-toml/tomlpub.go +++ /dev/null @@ -1,71 +0,0 @@ -package toml - -// PubTOMLValue wrapping tomlValue in order to access all properties from outside. -type PubTOMLValue = tomlValue - -func (ptv *PubTOMLValue) Value() interface{} { - return ptv.value -} -func (ptv *PubTOMLValue) Comment() string { - return ptv.comment -} -func (ptv *PubTOMLValue) Commented() bool { - return ptv.commented -} -func (ptv *PubTOMLValue) Multiline() bool { - return ptv.multiline -} -func (ptv *PubTOMLValue) Position() Position { - return ptv.position -} - -func (ptv *PubTOMLValue) SetValue(v interface{}) { - ptv.value = v -} -func (ptv *PubTOMLValue) SetComment(s string) { - ptv.comment = s -} -func (ptv *PubTOMLValue) SetCommented(c bool) { - ptv.commented = c -} -func (ptv *PubTOMLValue) SetMultiline(m bool) { - ptv.multiline = m -} -func (ptv *PubTOMLValue) SetPosition(p Position) { - ptv.position = p -} - -// PubTree wrapping Tree in order to access all properties from outside. -type PubTree = Tree - -func (pt *PubTree) Values() map[string]interface{} { - return pt.values -} - -func (pt *PubTree) Comment() string { - return pt.comment -} - -func (pt *PubTree) Commented() bool { - return pt.commented -} - -func (pt *PubTree) Inline() bool { - return pt.inline -} - -func (pt *PubTree) SetValues(v map[string]interface{}) { - pt.values = v -} - -func (pt *PubTree) SetComment(c string) { - pt.comment = c -} - -func (pt *PubTree) SetCommented(c bool) { - pt.commented = c -} - -func (pt *PubTree) SetInline(i bool) { - pt.inline = i -} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_create.go b/vendor/github.com/pelletier/go-toml/tomltree_create.go deleted file mode 100644 index 80353500a0..0000000000 --- a/vendor/github.com/pelletier/go-toml/tomltree_create.go +++ /dev/null @@ -1,155 +0,0 @@ -package toml - -import ( - "fmt" - "reflect" - "time" -) - -var kindToType = [reflect.String + 1]reflect.Type{ - reflect.Bool: reflect.TypeOf(true), - reflect.String: reflect.TypeOf(""), - reflect.Float32: reflect.TypeOf(float64(1)), - reflect.Float64: reflect.TypeOf(float64(1)), - reflect.Int: reflect.TypeOf(int64(1)), - reflect.Int8: reflect.TypeOf(int64(1)), - reflect.Int16: reflect.TypeOf(int64(1)), - reflect.Int32: reflect.TypeOf(int64(1)), - reflect.Int64: reflect.TypeOf(int64(1)), - reflect.Uint: reflect.TypeOf(uint64(1)), - reflect.Uint8: reflect.TypeOf(uint64(1)), - reflect.Uint16: reflect.TypeOf(uint64(1)), - reflect.Uint32: reflect.TypeOf(uint64(1)), - reflect.Uint64: reflect.TypeOf(uint64(1)), -} - -// typeFor returns a reflect.Type for a reflect.Kind, or nil if none is found. -// supported values: -// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32 -func typeFor(k reflect.Kind) reflect.Type { - if k > 0 && int(k) < len(kindToType) { - return kindToType[k] - } - return nil -} - -func simpleValueCoercion(object interface{}) (interface{}, error) { - switch original := object.(type) { - case string, bool, int64, uint64, float64, time.Time: - return original, nil - case int: - return int64(original), nil - case int8: - return int64(original), nil - case int16: - return int64(original), nil - case int32: - return int64(original), nil - case uint: - return uint64(original), nil - case uint8: - return uint64(original), nil - case uint16: - return uint64(original), nil - case uint32: - return uint64(original), nil - case float32: - return float64(original), nil - case fmt.Stringer: - return original.String(), nil - case []interface{}: - value := reflect.ValueOf(original) - length := value.Len() - arrayValue := reflect.MakeSlice(value.Type(), 0, length) - for i := 0; i < length; i++ { - val := value.Index(i).Interface() - simpleValue, err := simpleValueCoercion(val) - if err != nil { - return nil, err - } - arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) - } - return arrayValue.Interface(), nil - default: - return nil, fmt.Errorf("cannot convert type %T to Tree", object) - } -} - -func sliceToTree(object interface{}) (interface{}, error) { - // arrays are a bit tricky, since they can represent either a - // collection of simple values, which is represented by one - // *tomlValue, or an array of tables, which is represented by an - // array of *Tree. - - // holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice - value := reflect.ValueOf(object) - insideType := value.Type().Elem() - length := value.Len() - if length > 0 { - insideType = reflect.ValueOf(value.Index(0).Interface()).Type() - } - if insideType.Kind() == reflect.Map { - // this is considered as an array of tables - tablesArray := make([]*Tree, 0, length) - for i := 0; i < length; i++ { - table := value.Index(i) - tree, err := toTree(table.Interface()) - if err != nil { - return nil, err - } - tablesArray = append(tablesArray, tree.(*Tree)) - } - return tablesArray, nil - } - - sliceType := typeFor(insideType.Kind()) - if sliceType == nil { - sliceType = insideType - } - - arrayValue := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, length) - - for i := 0; i < length; i++ { - val := value.Index(i).Interface() - simpleValue, err := simpleValueCoercion(val) - if err != nil { - return nil, err - } - arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) - } - return &tomlValue{value: arrayValue.Interface(), position: Position{}}, nil -} - -func toTree(object interface{}) (interface{}, error) { - value := reflect.ValueOf(object) - - if value.Kind() == reflect.Map { - values := map[string]interface{}{} - keys := value.MapKeys() - for _, key := range keys { - if key.Kind() != reflect.String { - if _, ok := key.Interface().(string); !ok { - return nil, fmt.Errorf("map key needs to be a string, not %T (%v)", key.Interface(), key.Kind()) - } - } - - v := value.MapIndex(key) - newValue, err := toTree(v.Interface()) - if err != nil { - return nil, err - } - values[key.String()] = newValue - } - return &Tree{values: values, position: Position{}}, nil - } - - if value.Kind() == reflect.Array || value.Kind() == reflect.Slice { - return sliceToTree(object) - } - - simpleValue, err := simpleValueCoercion(object) - if err != nil { - return nil, err - } - return &tomlValue{value: simpleValue, position: Position{}}, nil -} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go deleted file mode 100644 index c9afbdab76..0000000000 --- a/vendor/github.com/pelletier/go-toml/tomltree_write.go +++ /dev/null @@ -1,552 +0,0 @@ -package toml - -import ( - "bytes" - "fmt" - "io" - "math" - "math/big" - "reflect" - "sort" - "strconv" - "strings" - "time" -) - -type valueComplexity int - -const ( - valueSimple valueComplexity = iota + 1 - valueComplex -) - -type sortNode struct { - key string - complexity valueComplexity -} - -// Encodes a string to a TOML-compliant multi-line string value -// This function is a clone of the existing encodeTomlString function, except that whitespace characters -// are preserved. Quotation marks and backslashes are also not escaped. -func encodeMultilineTomlString(value string, commented string) string { - var b bytes.Buffer - adjacentQuoteCount := 0 - - b.WriteString(commented) - for i, rr := range value { - if rr != '"' { - adjacentQuoteCount = 0 - } else { - adjacentQuoteCount++ - } - switch rr { - case '\b': - b.WriteString(`\b`) - case '\t': - b.WriteString("\t") - case '\n': - b.WriteString("\n" + commented) - case '\f': - b.WriteString(`\f`) - case '\r': - b.WriteString("\r") - case '"': - if adjacentQuoteCount >= 3 || i == len(value)-1 { - adjacentQuoteCount = 0 - b.WriteString(`\"`) - } else { - b.WriteString(`"`) - } - case '\\': - b.WriteString(`\`) - default: - intRr := uint16(rr) - if intRr < 0x001F { - b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) - } else { - b.WriteRune(rr) - } - } - } - return b.String() -} - -// Encodes a string to a TOML-compliant string value -func encodeTomlString(value string) string { - var b bytes.Buffer - - for _, rr := range value { - switch rr { - case '\b': - b.WriteString(`\b`) - case '\t': - b.WriteString(`\t`) - case '\n': - b.WriteString(`\n`) - case '\f': - b.WriteString(`\f`) - case '\r': - b.WriteString(`\r`) - case '"': - b.WriteString(`\"`) - case '\\': - b.WriteString(`\\`) - default: - intRr := uint16(rr) - if intRr < 0x001F { - b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) - } else { - b.WriteRune(rr) - } - } - } - return b.String() -} - -func tomlTreeStringRepresentation(t *Tree, ord MarshalOrder) (string, error) { - var orderedVals []sortNode - switch ord { - case OrderPreserve: - orderedVals = sortByLines(t) - default: - orderedVals = sortAlphabetical(t) - } - - var values []string - for _, node := range orderedVals { - k := node.key - v := t.values[k] - - repr, err := tomlValueStringRepresentation(v, "", "", ord, false) - if err != nil { - return "", err - } - values = append(values, quoteKeyIfNeeded(k)+" = "+repr) - } - return "{ " + strings.Join(values, ", ") + " }", nil -} - -func tomlValueStringRepresentation(v interface{}, commented string, indent string, ord MarshalOrder, arraysOneElementPerLine bool) (string, error) { - // this interface check is added to dereference the change made in the writeTo function. - // That change was made to allow this function to see formatting options. - tv, ok := v.(*tomlValue) - if ok { - v = tv.value - } else { - tv = &tomlValue{} - } - - switch value := v.(type) { - case uint64: - return strconv.FormatUint(value, 10), nil - case int64: - return strconv.FormatInt(value, 10), nil - case float64: - // Default bit length is full 64 - bits := 64 - // Float panics if nan is used - if !math.IsNaN(value) { - // if 32 bit accuracy is enough to exactly show, use 32 - _, acc := big.NewFloat(value).Float32() - if acc == big.Exact { - bits = 32 - } - } - if math.Trunc(value) == value { - return strings.ToLower(strconv.FormatFloat(value, 'f', 1, bits)), nil - } - return strings.ToLower(strconv.FormatFloat(value, 'f', -1, bits)), nil - case string: - if tv.multiline { - if tv.literal { - b := strings.Builder{} - b.WriteString("'''\n") - b.Write([]byte(value)) - b.WriteString("\n'''") - return b.String(), nil - } else { - return "\"\"\"\n" + encodeMultilineTomlString(value, commented) + "\"\"\"", nil - } - } - return "\"" + encodeTomlString(value) + "\"", nil - case []byte: - b, _ := v.([]byte) - return string(b), nil - case bool: - if value { - return "true", nil - } - return "false", nil - case time.Time: - return value.Format(time.RFC3339), nil - case LocalDate: - return value.String(), nil - case LocalDateTime: - return value.String(), nil - case LocalTime: - return value.String(), nil - case *Tree: - return tomlTreeStringRepresentation(value, ord) - case nil: - return "", nil - } - - rv := reflect.ValueOf(v) - - if rv.Kind() == reflect.Slice { - var values []string - for i := 0; i < rv.Len(); i++ { - item := rv.Index(i).Interface() - itemRepr, err := tomlValueStringRepresentation(item, commented, indent, ord, arraysOneElementPerLine) - if err != nil { - return "", err - } - values = append(values, itemRepr) - } - if arraysOneElementPerLine && len(values) > 1 { - stringBuffer := bytes.Buffer{} - valueIndent := indent + ` ` // TODO: move that to a shared encoder state - - stringBuffer.WriteString("[\n") - - for _, value := range values { - stringBuffer.WriteString(valueIndent) - stringBuffer.WriteString(commented + value) - stringBuffer.WriteString(`,`) - stringBuffer.WriteString("\n") - } - - stringBuffer.WriteString(indent + commented + "]") - - return stringBuffer.String(), nil - } - return "[" + strings.Join(values, ", ") + "]", nil - } - return "", fmt.Errorf("unsupported value type %T: %v", v, v) -} - -func getTreeArrayLine(trees []*Tree) (line int) { - // Prevent returning 0 for empty trees - line = int(^uint(0) >> 1) - // get lowest line number >= 0 - for _, tv := range trees { - if tv.position.Line < line || line == 0 { - line = tv.position.Line - } - } - return -} - -func sortByLines(t *Tree) (vals []sortNode) { - var ( - line int - lines []int - tv *Tree - tom *tomlValue - node sortNode - ) - vals = make([]sortNode, 0) - m := make(map[int]sortNode) - - for k := range t.values { - v := t.values[k] - switch v.(type) { - case *Tree: - tv = v.(*Tree) - line = tv.position.Line - node = sortNode{key: k, complexity: valueComplex} - case []*Tree: - line = getTreeArrayLine(v.([]*Tree)) - node = sortNode{key: k, complexity: valueComplex} - default: - tom = v.(*tomlValue) - line = tom.position.Line - node = sortNode{key: k, complexity: valueSimple} - } - lines = append(lines, line) - vals = append(vals, node) - m[line] = node - } - sort.Ints(lines) - - for i, line := range lines { - vals[i] = m[line] - } - - return vals -} - -func sortAlphabetical(t *Tree) (vals []sortNode) { - var ( - node sortNode - simpVals []string - compVals []string - ) - vals = make([]sortNode, 0) - m := make(map[string]sortNode) - - for k := range t.values { - v := t.values[k] - switch v.(type) { - case *Tree, []*Tree: - node = sortNode{key: k, complexity: valueComplex} - compVals = append(compVals, node.key) - default: - node = sortNode{key: k, complexity: valueSimple} - simpVals = append(simpVals, node.key) - } - vals = append(vals, node) - m[node.key] = node - } - - // Simples first to match previous implementation - sort.Strings(simpVals) - i := 0 - for _, key := range simpVals { - vals[i] = m[key] - i++ - } - - sort.Strings(compVals) - for _, key := range compVals { - vals[i] = m[key] - i++ - } - - return vals -} - -func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) { - return t.writeToOrdered(w, indent, keyspace, bytesCount, arraysOneElementPerLine, OrderAlphabetical, " ", false, false) -} - -func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool, ord MarshalOrder, indentString string, compactComments, parentCommented bool) (int64, error) { - var orderedVals []sortNode - - switch ord { - case OrderPreserve: - orderedVals = sortByLines(t) - default: - orderedVals = sortAlphabetical(t) - } - - for _, node := range orderedVals { - switch node.complexity { - case valueComplex: - k := node.key - v := t.values[k] - - combinedKey := quoteKeyIfNeeded(k) - if keyspace != "" { - combinedKey = keyspace + "." + combinedKey - } - - switch node := v.(type) { - // node has to be of those two types given how keys are sorted above - case *Tree: - tv, ok := t.values[k].(*Tree) - if !ok { - return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) - } - if tv.comment != "" { - comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1) - start := "# " - if strings.HasPrefix(comment, "#") { - start = "" - } - writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment) - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc - } - } - - var commented string - if parentCommented || t.commented || tv.commented { - commented = "# " - } - writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - bytesCount, err = node.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, compactComments, parentCommented || t.commented || tv.commented) - if err != nil { - return bytesCount, err - } - case []*Tree: - for _, subTree := range node { - var commented string - if parentCommented || t.commented || subTree.commented { - commented = "# " - } - writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - - bytesCount, err = subTree.writeToOrdered(w, indent+indentString, combinedKey, bytesCount, arraysOneElementPerLine, ord, indentString, compactComments, parentCommented || t.commented || subTree.commented) - if err != nil { - return bytesCount, err - } - } - } - default: // Simple - k := node.key - v, ok := t.values[k].(*tomlValue) - if !ok { - return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) - } - - var commented string - if parentCommented || t.commented || v.commented { - commented = "# " - } - repr, err := tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine) - if err != nil { - return bytesCount, err - } - - if v.comment != "" { - comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1) - start := "# " - if strings.HasPrefix(comment, "#") { - start = "" - } - if !compactComments { - writtenBytesCountComment, errc := writeStrings(w, "\n") - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc - } - } - writtenBytesCountComment, errc := writeStrings(w, indent, start, comment, "\n") - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc - } - } - - quotedKey := quoteKeyIfNeeded(k) - writtenBytesCount, err := writeStrings(w, indent, commented, quotedKey, " = ", repr, "\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - } - } - - return bytesCount, nil -} - -// quote a key if it does not fit the bare key format (A-Za-z0-9_-) -// quoted keys use the same rules as strings -func quoteKeyIfNeeded(k string) string { - // when encoding a map with the 'quoteMapKeys' option enabled, the tree will contain - // keys that have already been quoted. - // not an ideal situation, but good enough of a stop gap. - if len(k) >= 2 && k[0] == '"' && k[len(k)-1] == '"' { - return k - } - isBare := true - for _, r := range k { - if !isValidBareChar(r) { - isBare = false - break - } - } - if isBare { - return k - } - return quoteKey(k) -} - -func quoteKey(k string) string { - return "\"" + encodeTomlString(k) + "\"" -} - -func writeStrings(w io.Writer, s ...string) (int, error) { - var n int - for i := range s { - b, err := io.WriteString(w, s[i]) - n += b - if err != nil { - return n, err - } - } - return n, nil -} - -// WriteTo encode the Tree as Toml and writes it to the writer w. -// Returns the number of bytes written in case of success, or an error if anything happened. -func (t *Tree) WriteTo(w io.Writer) (int64, error) { - return t.writeTo(w, "", "", 0, false) -} - -// ToTomlString generates a human-readable representation of the current tree. -// Output spans multiple lines, and is suitable for ingest by a TOML parser. -// If the conversion cannot be performed, ToString returns a non-nil error. -func (t *Tree) ToTomlString() (string, error) { - b, err := t.Marshal() - if err != nil { - return "", err - } - return string(b), nil -} - -// String generates a human-readable representation of the current tree. -// Alias of ToString. Present to implement the fmt.Stringer interface. -func (t *Tree) String() string { - result, _ := t.ToTomlString() - return result -} - -// ToMap recursively generates a representation of the tree using Go built-in structures. -// The following types are used: -// -// * bool -// * float64 -// * int64 -// * string -// * uint64 -// * time.Time -// * map[string]interface{} (where interface{} is any of this list) -// * []interface{} (where interface{} is any of this list) -func (t *Tree) ToMap() map[string]interface{} { - result := map[string]interface{}{} - - for k, v := range t.values { - switch node := v.(type) { - case []*Tree: - var array []interface{} - for _, item := range node { - array = append(array, item.ToMap()) - } - result[k] = array - case *Tree: - result[k] = node.ToMap() - case *tomlValue: - result[k] = tomlValueToGo(node.value) - } - } - return result -} - -func tomlValueToGo(v interface{}) interface{} { - if tree, ok := v.(*Tree); ok { - return tree.ToMap() - } - - rv := reflect.ValueOf(v) - - if rv.Kind() != reflect.Slice { - return v - } - values := make([]interface{}, rv.Len()) - for i := 0; i < rv.Len(); i++ { - item := rv.Index(i).Interface() - values[i] = tomlValueToGo(item) - } - return values -} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_writepub.go b/vendor/github.com/pelletier/go-toml/tomltree_writepub.go deleted file mode 100644 index fa326308cf..0000000000 --- a/vendor/github.com/pelletier/go-toml/tomltree_writepub.go +++ /dev/null @@ -1,6 +0,0 @@ -package toml - -// ValueStringRepresentation transforms an interface{} value into its toml string representation. -func ValueStringRepresentation(v interface{}, commented string, indent string, ord MarshalOrder, arraysOneElementPerLine bool) (string, error) { - return tomlValueStringRepresentation(v, commented, indent, ord, arraysOneElementPerLine) -} diff --git a/vendor/github.com/pelletier/go-toml/v2/README.md b/vendor/github.com/pelletier/go-toml/v2/README.md index a63c3a7967..9f8439cc7d 100644 --- a/vendor/github.com/pelletier/go-toml/v2/README.md +++ b/vendor/github.com/pelletier/go-toml/v2/README.md @@ -140,6 +140,17 @@ fmt.Println(string(b)) [marshal]: https://pkg.go.dev/github.com/pelletier/go-toml/v2#Marshal +## Unstable API + +This API does not yet follow the backward compatibility guarantees of this +library. They provide early access to features that may have rough edges or an +API subject to change. + +### Parser + +Parser is the unstable API that allows iterative parsing of a TOML document at +the AST level. See https://pkg.go.dev/github.com/pelletier/go-toml/v2/unstable. + ## Benchmarks Execution time speedup compared to other Go TOML libraries: diff --git a/vendor/github.com/pelletier/go-toml/v2/decode.go b/vendor/github.com/pelletier/go-toml/v2/decode.go index 4af965360a..3a860d0f6a 100644 --- a/vendor/github.com/pelletier/go-toml/v2/decode.go +++ b/vendor/github.com/pelletier/go-toml/v2/decode.go @@ -5,6 +5,8 @@ import ( "math" "strconv" "time" + + "github.com/pelletier/go-toml/v2/unstable" ) func parseInteger(b []byte) (int64, error) { @@ -32,7 +34,7 @@ func parseLocalDate(b []byte) (LocalDate, error) { var date LocalDate if len(b) != 10 || b[4] != '-' || b[7] != '-' { - return date, newDecodeError(b, "dates are expected to have the format YYYY-MM-DD") + return date, unstable.NewParserError(b, "dates are expected to have the format YYYY-MM-DD") } var err error @@ -53,7 +55,7 @@ func parseLocalDate(b []byte) (LocalDate, error) { } if !isValidDate(date.Year, date.Month, date.Day) { - return LocalDate{}, newDecodeError(b, "impossible date") + return LocalDate{}, unstable.NewParserError(b, "impossible date") } return date, nil @@ -64,7 +66,7 @@ func parseDecimalDigits(b []byte) (int, error) { for i, c := range b { if c < '0' || c > '9' { - return 0, newDecodeError(b[i:i+1], "expected digit (0-9)") + return 0, unstable.NewParserError(b[i:i+1], "expected digit (0-9)") } v *= 10 v += int(c - '0') @@ -97,7 +99,7 @@ func parseDateTime(b []byte) (time.Time, error) { } else { const dateTimeByteLen = 6 if len(b) != dateTimeByteLen { - return time.Time{}, newDecodeError(b, "invalid date-time timezone") + return time.Time{}, unstable.NewParserError(b, "invalid date-time timezone") } var direction int switch b[0] { @@ -106,11 +108,11 @@ func parseDateTime(b []byte) (time.Time, error) { case '+': direction = +1 default: - return time.Time{}, newDecodeError(b[:1], "invalid timezone offset character") + return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset character") } if b[3] != ':' { - return time.Time{}, newDecodeError(b[3:4], "expected a : separator") + return time.Time{}, unstable.NewParserError(b[3:4], "expected a : separator") } hours, err := parseDecimalDigits(b[1:3]) @@ -118,7 +120,7 @@ func parseDateTime(b []byte) (time.Time, error) { return time.Time{}, err } if hours > 23 { - return time.Time{}, newDecodeError(b[:1], "invalid timezone offset hours") + return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset hours") } minutes, err := parseDecimalDigits(b[4:6]) @@ -126,7 +128,7 @@ func parseDateTime(b []byte) (time.Time, error) { return time.Time{}, err } if minutes > 59 { - return time.Time{}, newDecodeError(b[:1], "invalid timezone offset minutes") + return time.Time{}, unstable.NewParserError(b[:1], "invalid timezone offset minutes") } seconds := direction * (hours*3600 + minutes*60) @@ -139,7 +141,7 @@ func parseDateTime(b []byte) (time.Time, error) { } if len(b) > 0 { - return time.Time{}, newDecodeError(b, "extra bytes at the end of the timezone") + return time.Time{}, unstable.NewParserError(b, "extra bytes at the end of the timezone") } t := time.Date( @@ -160,7 +162,7 @@ func parseLocalDateTime(b []byte) (LocalDateTime, []byte, error) { const localDateTimeByteMinLen = 11 if len(b) < localDateTimeByteMinLen { - return dt, nil, newDecodeError(b, "local datetimes are expected to have the format YYYY-MM-DDTHH:MM:SS[.NNNNNNNNN]") + return dt, nil, unstable.NewParserError(b, "local datetimes are expected to have the format YYYY-MM-DDTHH:MM:SS[.NNNNNNNNN]") } date, err := parseLocalDate(b[:10]) @@ -171,7 +173,7 @@ func parseLocalDateTime(b []byte) (LocalDateTime, []byte, error) { sep := b[10] if sep != 'T' && sep != ' ' && sep != 't' { - return dt, nil, newDecodeError(b[10:11], "datetime separator is expected to be T or a space") + return dt, nil, unstable.NewParserError(b[10:11], "datetime separator is expected to be T or a space") } t, rest, err := parseLocalTime(b[11:]) @@ -195,7 +197,7 @@ func parseLocalTime(b []byte) (LocalTime, []byte, error) { // check if b matches to have expected format HH:MM:SS[.NNNNNN] const localTimeByteLen = 8 if len(b) < localTimeByteLen { - return t, nil, newDecodeError(b, "times are expected to have the format HH:MM:SS[.NNNNNN]") + return t, nil, unstable.NewParserError(b, "times are expected to have the format HH:MM:SS[.NNNNNN]") } var err error @@ -206,10 +208,10 @@ func parseLocalTime(b []byte) (LocalTime, []byte, error) { } if t.Hour > 23 { - return t, nil, newDecodeError(b[0:2], "hour cannot be greater 23") + return t, nil, unstable.NewParserError(b[0:2], "hour cannot be greater 23") } if b[2] != ':' { - return t, nil, newDecodeError(b[2:3], "expecting colon between hours and minutes") + return t, nil, unstable.NewParserError(b[2:3], "expecting colon between hours and minutes") } t.Minute, err = parseDecimalDigits(b[3:5]) @@ -217,10 +219,10 @@ func parseLocalTime(b []byte) (LocalTime, []byte, error) { return t, nil, err } if t.Minute > 59 { - return t, nil, newDecodeError(b[3:5], "minutes cannot be greater 59") + return t, nil, unstable.NewParserError(b[3:5], "minutes cannot be greater 59") } if b[5] != ':' { - return t, nil, newDecodeError(b[5:6], "expecting colon between minutes and seconds") + return t, nil, unstable.NewParserError(b[5:6], "expecting colon between minutes and seconds") } t.Second, err = parseDecimalDigits(b[6:8]) @@ -229,7 +231,7 @@ func parseLocalTime(b []byte) (LocalTime, []byte, error) { } if t.Second > 60 { - return t, nil, newDecodeError(b[6:8], "seconds cannot be greater 60") + return t, nil, unstable.NewParserError(b[6:8], "seconds cannot be greater 60") } b = b[8:] @@ -242,7 +244,7 @@ func parseLocalTime(b []byte) (LocalTime, []byte, error) { for i, c := range b[1:] { if !isDigit(c) { if i == 0 { - return t, nil, newDecodeError(b[0:1], "need at least one digit after fraction point") + return t, nil, unstable.NewParserError(b[0:1], "need at least one digit after fraction point") } break } @@ -266,7 +268,7 @@ func parseLocalTime(b []byte) (LocalTime, []byte, error) { } if precision == 0 { - return t, nil, newDecodeError(b[:1], "nanoseconds need at least one digit") + return t, nil, unstable.NewParserError(b[:1], "nanoseconds need at least one digit") } t.Nanosecond = frac * nspow[precision] @@ -289,24 +291,24 @@ func parseFloat(b []byte) (float64, error) { } if cleaned[0] == '.' { - return 0, newDecodeError(b, "float cannot start with a dot") + return 0, unstable.NewParserError(b, "float cannot start with a dot") } if cleaned[len(cleaned)-1] == '.' { - return 0, newDecodeError(b, "float cannot end with a dot") + return 0, unstable.NewParserError(b, "float cannot end with a dot") } dotAlreadySeen := false for i, c := range cleaned { if c == '.' { if dotAlreadySeen { - return 0, newDecodeError(b[i:i+1], "float can have at most one decimal point") + return 0, unstable.NewParserError(b[i:i+1], "float can have at most one decimal point") } if !isDigit(cleaned[i-1]) { - return 0, newDecodeError(b[i-1:i+1], "float decimal point must be preceded by a digit") + return 0, unstable.NewParserError(b[i-1:i+1], "float decimal point must be preceded by a digit") } if !isDigit(cleaned[i+1]) { - return 0, newDecodeError(b[i:i+2], "float decimal point must be followed by a digit") + return 0, unstable.NewParserError(b[i:i+2], "float decimal point must be followed by a digit") } dotAlreadySeen = true } @@ -317,12 +319,12 @@ func parseFloat(b []byte) (float64, error) { start = 1 } if cleaned[start] == '0' && isDigit(cleaned[start+1]) { - return 0, newDecodeError(b, "float integer part cannot have leading zeroes") + return 0, unstable.NewParserError(b, "float integer part cannot have leading zeroes") } f, err := strconv.ParseFloat(string(cleaned), 64) if err != nil { - return 0, newDecodeError(b, "unable to parse float: %w", err) + return 0, unstable.NewParserError(b, "unable to parse float: %w", err) } return f, nil @@ -336,7 +338,7 @@ func parseIntHex(b []byte) (int64, error) { i, err := strconv.ParseInt(string(cleaned), 16, 64) if err != nil { - return 0, newDecodeError(b, "couldn't parse hexadecimal number: %w", err) + return 0, unstable.NewParserError(b, "couldn't parse hexadecimal number: %w", err) } return i, nil @@ -350,7 +352,7 @@ func parseIntOct(b []byte) (int64, error) { i, err := strconv.ParseInt(string(cleaned), 8, 64) if err != nil { - return 0, newDecodeError(b, "couldn't parse octal number: %w", err) + return 0, unstable.NewParserError(b, "couldn't parse octal number: %w", err) } return i, nil @@ -364,7 +366,7 @@ func parseIntBin(b []byte) (int64, error) { i, err := strconv.ParseInt(string(cleaned), 2, 64) if err != nil { - return 0, newDecodeError(b, "couldn't parse binary number: %w", err) + return 0, unstable.NewParserError(b, "couldn't parse binary number: %w", err) } return i, nil @@ -387,12 +389,12 @@ func parseIntDec(b []byte) (int64, error) { } if len(cleaned) > startIdx+1 && cleaned[startIdx] == '0' { - return 0, newDecodeError(b, "leading zero not allowed on decimal number") + return 0, unstable.NewParserError(b, "leading zero not allowed on decimal number") } i, err := strconv.ParseInt(string(cleaned), 10, 64) if err != nil { - return 0, newDecodeError(b, "couldn't parse decimal number: %w", err) + return 0, unstable.NewParserError(b, "couldn't parse decimal number: %w", err) } return i, nil @@ -409,11 +411,11 @@ func checkAndRemoveUnderscoresIntegers(b []byte) ([]byte, error) { } if b[start] == '_' { - return nil, newDecodeError(b[start:start+1], "number cannot start with underscore") + return nil, unstable.NewParserError(b[start:start+1], "number cannot start with underscore") } if b[len(b)-1] == '_' { - return nil, newDecodeError(b[len(b)-1:], "number cannot end with underscore") + return nil, unstable.NewParserError(b[len(b)-1:], "number cannot end with underscore") } // fast path @@ -435,7 +437,7 @@ func checkAndRemoveUnderscoresIntegers(b []byte) ([]byte, error) { c := b[i] if c == '_' { if !before { - return nil, newDecodeError(b[i-1:i+1], "number must have at least one digit between underscores") + return nil, unstable.NewParserError(b[i-1:i+1], "number must have at least one digit between underscores") } before = false } else { @@ -449,11 +451,11 @@ func checkAndRemoveUnderscoresIntegers(b []byte) ([]byte, error) { func checkAndRemoveUnderscoresFloats(b []byte) ([]byte, error) { if b[0] == '_' { - return nil, newDecodeError(b[0:1], "number cannot start with underscore") + return nil, unstable.NewParserError(b[0:1], "number cannot start with underscore") } if b[len(b)-1] == '_' { - return nil, newDecodeError(b[len(b)-1:], "number cannot end with underscore") + return nil, unstable.NewParserError(b[len(b)-1:], "number cannot end with underscore") } // fast path @@ -476,10 +478,10 @@ func checkAndRemoveUnderscoresFloats(b []byte) ([]byte, error) { switch c { case '_': if !before { - return nil, newDecodeError(b[i-1:i+1], "number must have at least one digit between underscores") + return nil, unstable.NewParserError(b[i-1:i+1], "number must have at least one digit between underscores") } if i < len(b)-1 && (b[i+1] == 'e' || b[i+1] == 'E') { - return nil, newDecodeError(b[i+1:i+2], "cannot have underscore before exponent") + return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore before exponent") } before = false case '+', '-': @@ -488,15 +490,15 @@ func checkAndRemoveUnderscoresFloats(b []byte) ([]byte, error) { before = false case 'e', 'E': if i < len(b)-1 && b[i+1] == '_' { - return nil, newDecodeError(b[i+1:i+2], "cannot have underscore after exponent") + return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore after exponent") } cleaned = append(cleaned, c) case '.': if i < len(b)-1 && b[i+1] == '_' { - return nil, newDecodeError(b[i+1:i+2], "cannot have underscore after decimal point") + return nil, unstable.NewParserError(b[i+1:i+2], "cannot have underscore after decimal point") } if i > 0 && b[i-1] == '_' { - return nil, newDecodeError(b[i-1:i], "cannot have underscore before decimal point") + return nil, unstable.NewParserError(b[i-1:i], "cannot have underscore before decimal point") } cleaned = append(cleaned, c) default: @@ -542,3 +544,7 @@ func daysIn(m int, year int) int { func isLeap(year int) bool { return year%4 == 0 && (year%100 != 0 || year%400 == 0) } + +func isDigit(r byte) bool { + return r >= '0' && r <= '9' +} diff --git a/vendor/github.com/pelletier/go-toml/v2/errors.go b/vendor/github.com/pelletier/go-toml/v2/errors.go index 2e7f0ffdf8..309733f1f9 100644 --- a/vendor/github.com/pelletier/go-toml/v2/errors.go +++ b/vendor/github.com/pelletier/go-toml/v2/errors.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/pelletier/go-toml/v2/internal/danger" + "github.com/pelletier/go-toml/v2/unstable" ) // DecodeError represents an error encountered during the parsing or decoding @@ -55,25 +56,6 @@ func (s *StrictMissingError) String() string { type Key []string -// internal version of DecodeError that is used as the base to create a -// DecodeError with full context. -type decodeError struct { - highlight []byte - message string - key Key // optional -} - -func (de *decodeError) Error() string { - return de.message -} - -func newDecodeError(highlight []byte, format string, args ...interface{}) error { - return &decodeError{ - highlight: highlight, - message: fmt.Errorf(format, args...).Error(), - } -} - // Error returns the error message contained in the DecodeError. func (e *DecodeError) Error() string { return "toml: " + e.message @@ -105,12 +87,12 @@ func (e *DecodeError) Key() Key { // highlight can be freely deallocated. // //nolint:funlen -func wrapDecodeError(document []byte, de *decodeError) *DecodeError { - offset := danger.SubsliceOffset(document, de.highlight) +func wrapDecodeError(document []byte, de *unstable.ParserError) *DecodeError { + offset := danger.SubsliceOffset(document, de.Highlight) errMessage := de.Error() errLine, errColumn := positionAtEnd(document[:offset]) - before, after := linesOfContext(document, de.highlight, offset, 3) + before, after := linesOfContext(document, de.Highlight, offset, 3) var buf strings.Builder @@ -140,7 +122,7 @@ func wrapDecodeError(document []byte, de *decodeError) *DecodeError { buf.Write(before[0]) } - buf.Write(de.highlight) + buf.Write(de.Highlight) if len(after) > 0 { buf.Write(after[0]) @@ -158,7 +140,7 @@ func wrapDecodeError(document []byte, de *decodeError) *DecodeError { buf.WriteString(strings.Repeat(" ", len(before[0]))) } - buf.WriteString(strings.Repeat("~", len(de.highlight))) + buf.WriteString(strings.Repeat("~", len(de.Highlight))) if len(errMessage) > 0 { buf.WriteString(" ") @@ -183,7 +165,7 @@ func wrapDecodeError(document []byte, de *decodeError) *DecodeError { message: errMessage, line: errLine, column: errColumn, - key: de.key, + key: de.Key, human: buf.String(), } } diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/ast/builder.go b/vendor/github.com/pelletier/go-toml/v2/internal/ast/builder.go deleted file mode 100644 index 120f16e5ce..0000000000 --- a/vendor/github.com/pelletier/go-toml/v2/internal/ast/builder.go +++ /dev/null @@ -1,51 +0,0 @@ -package ast - -type Reference int - -const InvalidReference Reference = -1 - -func (r Reference) Valid() bool { - return r != InvalidReference -} - -type Builder struct { - tree Root - lastIdx int -} - -func (b *Builder) Tree() *Root { - return &b.tree -} - -func (b *Builder) NodeAt(ref Reference) *Node { - return b.tree.at(ref) -} - -func (b *Builder) Reset() { - b.tree.nodes = b.tree.nodes[:0] - b.lastIdx = 0 -} - -func (b *Builder) Push(n Node) Reference { - b.lastIdx = len(b.tree.nodes) - b.tree.nodes = append(b.tree.nodes, n) - return Reference(b.lastIdx) -} - -func (b *Builder) PushAndChain(n Node) Reference { - newIdx := len(b.tree.nodes) - b.tree.nodes = append(b.tree.nodes, n) - if b.lastIdx >= 0 { - b.tree.nodes[b.lastIdx].next = newIdx - b.lastIdx - } - b.lastIdx = newIdx - return Reference(b.lastIdx) -} - -func (b *Builder) AttachChild(parent Reference, child Reference) { - b.tree.nodes[parent].child = int(child) - int(parent) -} - -func (b *Builder) Chain(from Reference, to Reference) { - b.tree.nodes[from].next = int(to) - int(from) -} diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go b/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go new file mode 100644 index 0000000000..80f698db4b --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/internal/characters/ascii.go @@ -0,0 +1,42 @@ +package characters + +var invalidAsciiTable = [256]bool{ + 0x00: true, + 0x01: true, + 0x02: true, + 0x03: true, + 0x04: true, + 0x05: true, + 0x06: true, + 0x07: true, + 0x08: true, + // 0x09 TAB + // 0x0A LF + 0x0B: true, + 0x0C: true, + // 0x0D CR + 0x0E: true, + 0x0F: true, + 0x10: true, + 0x11: true, + 0x12: true, + 0x13: true, + 0x14: true, + 0x15: true, + 0x16: true, + 0x17: true, + 0x18: true, + 0x19: true, + 0x1A: true, + 0x1B: true, + 0x1C: true, + 0x1D: true, + 0x1E: true, + 0x1F: true, + // 0x20 - 0x7E Printable ASCII characters + 0x7F: true, +} + +func InvalidAscii(b byte) bool { + return invalidAsciiTable[b] +} diff --git a/test/performance/vendor/github.com/pelletier/go-toml/v2/utf8.go b/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go similarity index 87% rename from test/performance/vendor/github.com/pelletier/go-toml/v2/utf8.go rename to vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go index d47a4f20c5..db4f45acbf 100644 --- a/test/performance/vendor/github.com/pelletier/go-toml/v2/utf8.go +++ b/vendor/github.com/pelletier/go-toml/v2/internal/characters/utf8.go @@ -1,4 +1,4 @@ -package toml +package characters import ( "unicode/utf8" @@ -32,7 +32,7 @@ func (u utf8Err) Zero() bool { // 0x9 => tab, ok // 0xA - 0x1F => invalid // 0x7F => invalid -func utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) { +func Utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) { // Fast path. Check for and skip 8 bytes of ASCII characters per iteration. offset := 0 for len(p) >= 8 { @@ -48,7 +48,7 @@ func utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) { } for i, b := range p[:8] { - if invalidAscii(b) { + if InvalidAscii(b) { err.Index = offset + i err.Size = 1 return @@ -62,7 +62,7 @@ func utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) { for i := 0; i < n; { pi := p[i] if pi < utf8.RuneSelf { - if invalidAscii(pi) { + if InvalidAscii(pi) { err.Index = offset + i err.Size = 1 return @@ -106,11 +106,11 @@ func utf8TomlValidAlreadyEscaped(p []byte) (err utf8Err) { } // Return the size of the next rune if valid, 0 otherwise. -func utf8ValidNext(p []byte) int { +func Utf8ValidNext(p []byte) int { c := p[0] if c < utf8.RuneSelf { - if invalidAscii(c) { + if InvalidAscii(c) { return 0 } return 1 @@ -140,47 +140,6 @@ func utf8ValidNext(p []byte) int { return size } -var invalidAsciiTable = [256]bool{ - 0x00: true, - 0x01: true, - 0x02: true, - 0x03: true, - 0x04: true, - 0x05: true, - 0x06: true, - 0x07: true, - 0x08: true, - // 0x09 TAB - // 0x0A LF - 0x0B: true, - 0x0C: true, - // 0x0D CR - 0x0E: true, - 0x0F: true, - 0x10: true, - 0x11: true, - 0x12: true, - 0x13: true, - 0x14: true, - 0x15: true, - 0x16: true, - 0x17: true, - 0x18: true, - 0x19: true, - 0x1A: true, - 0x1B: true, - 0x1C: true, - 0x1D: true, - 0x1E: true, - 0x1F: true, - // 0x20 - 0x7E Printable ASCII characters - 0x7F: true, -} - -func invalidAscii(b byte) bool { - return invalidAsciiTable[b] -} - // acceptRange gives the range of valid values for the second byte in a UTF-8 // sequence. type acceptRange struct { diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go index 7c148f48d4..149b17f538 100644 --- a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go +++ b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/key.go @@ -1,8 +1,6 @@ package tracker -import ( - "github.com/pelletier/go-toml/v2/internal/ast" -) +import "github.com/pelletier/go-toml/v2/unstable" // KeyTracker is a tracker that keeps track of the current Key as the AST is // walked. @@ -11,19 +9,19 @@ type KeyTracker struct { } // UpdateTable sets the state of the tracker with the AST table node. -func (t *KeyTracker) UpdateTable(node *ast.Node) { +func (t *KeyTracker) UpdateTable(node *unstable.Node) { t.reset() t.Push(node) } // UpdateArrayTable sets the state of the tracker with the AST array table node. -func (t *KeyTracker) UpdateArrayTable(node *ast.Node) { +func (t *KeyTracker) UpdateArrayTable(node *unstable.Node) { t.reset() t.Push(node) } // Push the given key on the stack. -func (t *KeyTracker) Push(node *ast.Node) { +func (t *KeyTracker) Push(node *unstable.Node) { it := node.Key() for it.Next() { t.k = append(t.k, string(it.Node().Data)) @@ -31,7 +29,7 @@ func (t *KeyTracker) Push(node *ast.Node) { } // Pop key from stack. -func (t *KeyTracker) Pop(node *ast.Node) { +func (t *KeyTracker) Pop(node *unstable.Node) { it := node.Key() for it.Next() { t.k = t.k[:len(t.k)-1] diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go index a7ee05ba65..40e23f8304 100644 --- a/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go +++ b/vendor/github.com/pelletier/go-toml/v2/internal/tracker/seen.go @@ -5,7 +5,7 @@ import ( "fmt" "sync" - "github.com/pelletier/go-toml/v2/internal/ast" + "github.com/pelletier/go-toml/v2/unstable" ) type keyKind uint8 @@ -150,23 +150,23 @@ func (s *SeenTracker) setExplicitFlag(parentIdx int) { // CheckExpression takes a top-level node and checks that it does not contain // keys that have been seen in previous calls, and validates that types are // consistent. -func (s *SeenTracker) CheckExpression(node *ast.Node) error { +func (s *SeenTracker) CheckExpression(node *unstable.Node) error { if s.entries == nil { s.reset() } switch node.Kind { - case ast.KeyValue: + case unstable.KeyValue: return s.checkKeyValue(node) - case ast.Table: + case unstable.Table: return s.checkTable(node) - case ast.ArrayTable: + case unstable.ArrayTable: return s.checkArrayTable(node) default: panic(fmt.Errorf("this should not be a top level node type: %s", node.Kind)) } } -func (s *SeenTracker) checkTable(node *ast.Node) error { +func (s *SeenTracker) checkTable(node *unstable.Node) error { if s.currentIdx >= 0 { s.setExplicitFlag(s.currentIdx) } @@ -219,7 +219,7 @@ func (s *SeenTracker) checkTable(node *ast.Node) error { return nil } -func (s *SeenTracker) checkArrayTable(node *ast.Node) error { +func (s *SeenTracker) checkArrayTable(node *unstable.Node) error { if s.currentIdx >= 0 { s.setExplicitFlag(s.currentIdx) } @@ -267,7 +267,7 @@ func (s *SeenTracker) checkArrayTable(node *ast.Node) error { return nil } -func (s *SeenTracker) checkKeyValue(node *ast.Node) error { +func (s *SeenTracker) checkKeyValue(node *unstable.Node) error { parentIdx := s.currentIdx it := node.Key() @@ -297,26 +297,26 @@ func (s *SeenTracker) checkKeyValue(node *ast.Node) error { value := node.Value() switch value.Kind { - case ast.InlineTable: + case unstable.InlineTable: return s.checkInlineTable(value) - case ast.Array: + case unstable.Array: return s.checkArray(value) } return nil } -func (s *SeenTracker) checkArray(node *ast.Node) error { +func (s *SeenTracker) checkArray(node *unstable.Node) error { it := node.Children() for it.Next() { n := it.Node() switch n.Kind { - case ast.InlineTable: + case unstable.InlineTable: err := s.checkInlineTable(n) if err != nil { return err } - case ast.Array: + case unstable.Array: err := s.checkArray(n) if err != nil { return err @@ -326,7 +326,7 @@ func (s *SeenTracker) checkArray(node *ast.Node) error { return nil } -func (s *SeenTracker) checkInlineTable(node *ast.Node) error { +func (s *SeenTracker) checkInlineTable(node *unstable.Node) error { if pool.New == nil { pool.New = func() interface{} { return &SeenTracker{} diff --git a/vendor/github.com/pelletier/go-toml/v2/localtime.go b/vendor/github.com/pelletier/go-toml/v2/localtime.go index 30a31dcbde..a856bfdb0d 100644 --- a/vendor/github.com/pelletier/go-toml/v2/localtime.go +++ b/vendor/github.com/pelletier/go-toml/v2/localtime.go @@ -4,6 +4,8 @@ import ( "fmt" "strings" "time" + + "github.com/pelletier/go-toml/v2/unstable" ) // LocalDate represents a calendar day in no specific timezone. @@ -75,7 +77,7 @@ func (d LocalTime) MarshalText() ([]byte, error) { func (d *LocalTime) UnmarshalText(b []byte) error { res, left, err := parseLocalTime(b) if err == nil && len(left) != 0 { - err = newDecodeError(left, "extra characters") + err = unstable.NewParserError(left, "extra characters") } if err != nil { return err @@ -109,7 +111,7 @@ func (d LocalDateTime) MarshalText() ([]byte, error) { func (d *LocalDateTime) UnmarshalText(data []byte) error { res, left, err := parseLocalDateTime(data) if err == nil && len(left) != 0 { - err = newDecodeError(left, "extra characters") + err = unstable.NewParserError(left, "extra characters") } if err != nil { return err diff --git a/vendor/github.com/pelletier/go-toml/v2/marshaler.go b/vendor/github.com/pelletier/go-toml/v2/marshaler.go index acb288315b..07aceb9024 100644 --- a/vendor/github.com/pelletier/go-toml/v2/marshaler.go +++ b/vendor/github.com/pelletier/go-toml/v2/marshaler.go @@ -12,6 +12,8 @@ import ( "strings" "time" "unicode" + + "github.com/pelletier/go-toml/v2/internal/characters" ) // Marshal serializes a Go value as a TOML document. @@ -437,7 +439,7 @@ func (enc *Encoder) encodeString(b []byte, v string, options valueOptions) []byt func needsQuoting(v string) bool { // TODO: vectorize for _, b := range []byte(v) { - if b == '\'' || b == '\r' || b == '\n' || invalidAscii(b) { + if b == '\'' || b == '\r' || b == '\n' || characters.InvalidAscii(b) { return true } } diff --git a/vendor/github.com/pelletier/go-toml/v2/strict.go b/vendor/github.com/pelletier/go-toml/v2/strict.go index b7830d139c..802e7e4d15 100644 --- a/vendor/github.com/pelletier/go-toml/v2/strict.go +++ b/vendor/github.com/pelletier/go-toml/v2/strict.go @@ -1,9 +1,9 @@ package toml import ( - "github.com/pelletier/go-toml/v2/internal/ast" "github.com/pelletier/go-toml/v2/internal/danger" "github.com/pelletier/go-toml/v2/internal/tracker" + "github.com/pelletier/go-toml/v2/unstable" ) type strict struct { @@ -12,10 +12,10 @@ type strict struct { // Tracks the current key being processed. key tracker.KeyTracker - missing []decodeError + missing []unstable.ParserError } -func (s *strict) EnterTable(node *ast.Node) { +func (s *strict) EnterTable(node *unstable.Node) { if !s.Enabled { return } @@ -23,7 +23,7 @@ func (s *strict) EnterTable(node *ast.Node) { s.key.UpdateTable(node) } -func (s *strict) EnterArrayTable(node *ast.Node) { +func (s *strict) EnterArrayTable(node *unstable.Node) { if !s.Enabled { return } @@ -31,7 +31,7 @@ func (s *strict) EnterArrayTable(node *ast.Node) { s.key.UpdateArrayTable(node) } -func (s *strict) EnterKeyValue(node *ast.Node) { +func (s *strict) EnterKeyValue(node *unstable.Node) { if !s.Enabled { return } @@ -39,7 +39,7 @@ func (s *strict) EnterKeyValue(node *ast.Node) { s.key.Push(node) } -func (s *strict) ExitKeyValue(node *ast.Node) { +func (s *strict) ExitKeyValue(node *unstable.Node) { if !s.Enabled { return } @@ -47,27 +47,27 @@ func (s *strict) ExitKeyValue(node *ast.Node) { s.key.Pop(node) } -func (s *strict) MissingTable(node *ast.Node) { +func (s *strict) MissingTable(node *unstable.Node) { if !s.Enabled { return } - s.missing = append(s.missing, decodeError{ - highlight: keyLocation(node), - message: "missing table", - key: s.key.Key(), + s.missing = append(s.missing, unstable.ParserError{ + Highlight: keyLocation(node), + Message: "missing table", + Key: s.key.Key(), }) } -func (s *strict) MissingField(node *ast.Node) { +func (s *strict) MissingField(node *unstable.Node) { if !s.Enabled { return } - s.missing = append(s.missing, decodeError{ - highlight: keyLocation(node), - message: "missing field", - key: s.key.Key(), + s.missing = append(s.missing, unstable.ParserError{ + Highlight: keyLocation(node), + Message: "missing field", + Key: s.key.Key(), }) } @@ -88,7 +88,7 @@ func (s *strict) Error(doc []byte) error { return err } -func keyLocation(node *ast.Node) []byte { +func keyLocation(node *unstable.Node) []byte { k := node.Key() hasOne := k.Next() diff --git a/vendor/github.com/pelletier/go-toml/v2/types.go b/vendor/github.com/pelletier/go-toml/v2/types.go index 630a454663..3c6b8fe570 100644 --- a/vendor/github.com/pelletier/go-toml/v2/types.go +++ b/vendor/github.com/pelletier/go-toml/v2/types.go @@ -6,9 +6,9 @@ import ( "time" ) -var timeType = reflect.TypeOf(time.Time{}) -var textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem() -var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() -var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{}) -var sliceInterfaceType = reflect.TypeOf([]interface{}{}) +var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() +var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() +var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +var mapStringInterfaceType = reflect.TypeOf(map[string]interface{}(nil)) +var sliceInterfaceType = reflect.TypeOf([]interface{}(nil)) var stringType = reflect.TypeOf("") diff --git a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go index d0d7a72d08..70f6ec5720 100644 --- a/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go +++ b/vendor/github.com/pelletier/go-toml/v2/unmarshaler.go @@ -12,16 +12,16 @@ import ( "sync/atomic" "time" - "github.com/pelletier/go-toml/v2/internal/ast" "github.com/pelletier/go-toml/v2/internal/danger" "github.com/pelletier/go-toml/v2/internal/tracker" + "github.com/pelletier/go-toml/v2/unstable" ) // Unmarshal deserializes a TOML document into a Go value. // // It is a shortcut for Decoder.Decode() with the default options. func Unmarshal(data []byte, v interface{}) error { - p := parser{} + p := unstable.Parser{} p.Reset(data) d := decoder{p: &p} @@ -101,7 +101,7 @@ func (d *Decoder) Decode(v interface{}) error { return fmt.Errorf("toml: %w", err) } - p := parser{} + p := unstable.Parser{} p.Reset(b) dec := decoder{ p: &p, @@ -115,7 +115,7 @@ func (d *Decoder) Decode(v interface{}) error { type decoder struct { // Which parser instance in use for this decoding session. - p *parser + p *unstable.Parser // Flag indicating that the current expression is stashed. // If set to true, calling nextExpr will not actually pull a new expression @@ -157,7 +157,7 @@ func (d *decoder) typeMismatchError(toml string, target reflect.Type) error { return fmt.Errorf("toml: cannot decode TOML %s into a Go value of type %s", toml, target) } -func (d *decoder) expr() *ast.Node { +func (d *decoder) expr() *unstable.Node { return d.p.Expression() } @@ -208,12 +208,12 @@ func (d *decoder) FromParser(v interface{}) error { err := d.fromParser(r) if err == nil { - return d.strict.Error(d.p.data) + return d.strict.Error(d.p.Data()) } - var e *decodeError + var e *unstable.ParserError if errors.As(err, &e) { - return wrapDecodeError(d.p.data, e) + return wrapDecodeError(d.p.Data(), e) } return err @@ -234,16 +234,16 @@ func (d *decoder) fromParser(root reflect.Value) error { Rules for the unmarshal code: - The stack is used to keep track of which values need to be set where. -- handle* functions <=> switch on a given ast.Kind. +- handle* functions <=> switch on a given unstable.Kind. - unmarshalX* functions need to unmarshal a node of kind X. - An "object" is either a struct or a map. */ -func (d *decoder) handleRootExpression(expr *ast.Node, v reflect.Value) error { +func (d *decoder) handleRootExpression(expr *unstable.Node, v reflect.Value) error { var x reflect.Value var err error - if !(d.skipUntilTable && expr.Kind == ast.KeyValue) { + if !(d.skipUntilTable && expr.Kind == unstable.KeyValue) { err = d.seen.CheckExpression(expr) if err != nil { return err @@ -251,16 +251,16 @@ func (d *decoder) handleRootExpression(expr *ast.Node, v reflect.Value) error { } switch expr.Kind { - case ast.KeyValue: + case unstable.KeyValue: if d.skipUntilTable { return nil } x, err = d.handleKeyValue(expr, v) - case ast.Table: + case unstable.Table: d.skipUntilTable = false d.strict.EnterTable(expr) x, err = d.handleTable(expr.Key(), v) - case ast.ArrayTable: + case unstable.ArrayTable: d.skipUntilTable = false d.strict.EnterArrayTable(expr) x, err = d.handleArrayTable(expr.Key(), v) @@ -269,7 +269,7 @@ func (d *decoder) handleRootExpression(expr *ast.Node, v reflect.Value) error { } if d.skipUntilTable { - if expr.Kind == ast.Table || expr.Kind == ast.ArrayTable { + if expr.Kind == unstable.Table || expr.Kind == unstable.ArrayTable { d.strict.MissingTable(expr) } } else if err == nil && x.IsValid() { @@ -279,14 +279,14 @@ func (d *decoder) handleRootExpression(expr *ast.Node, v reflect.Value) error { return err } -func (d *decoder) handleArrayTable(key ast.Iterator, v reflect.Value) (reflect.Value, error) { +func (d *decoder) handleArrayTable(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { if key.Next() { return d.handleArrayTablePart(key, v) } return d.handleKeyValues(v) } -func (d *decoder) handleArrayTableCollectionLast(key ast.Iterator, v reflect.Value) (reflect.Value, error) { +func (d *decoder) handleArrayTableCollectionLast(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { switch v.Kind() { case reflect.Interface: elem := v.Elem() @@ -339,13 +339,13 @@ func (d *decoder) handleArrayTableCollectionLast(key ast.Iterator, v reflect.Val case reflect.Array: idx := d.arrayIndex(true, v) if idx >= v.Len() { - return v, fmt.Errorf("toml: cannot decode array table into %s at position %d", v.Type(), idx) + return v, fmt.Errorf("%s at position %d", d.typeMismatchError("array table", v.Type()), idx) } elem := v.Index(idx) _, err := d.handleArrayTable(key, elem) return v, err default: - return reflect.Value{}, fmt.Errorf("toml: cannot decode array table into a %s", v.Type()) + return reflect.Value{}, d.typeMismatchError("array table", v.Type()) } } @@ -353,7 +353,7 @@ func (d *decoder) handleArrayTableCollectionLast(key ast.Iterator, v reflect.Val // evaluated like a normal key, but if it returns a collection, it also needs to // point to the last element of the collection. Unless it is the last part of // the key, then it needs to create a new element at the end. -func (d *decoder) handleArrayTableCollection(key ast.Iterator, v reflect.Value) (reflect.Value, error) { +func (d *decoder) handleArrayTableCollection(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { if key.IsLast() { return d.handleArrayTableCollectionLast(key, v) } @@ -390,7 +390,7 @@ func (d *decoder) handleArrayTableCollection(key ast.Iterator, v reflect.Value) case reflect.Array: idx := d.arrayIndex(false, v) if idx >= v.Len() { - return v, fmt.Errorf("toml: cannot decode array table into %s at position %d", v.Type(), idx) + return v, fmt.Errorf("%s at position %d", d.typeMismatchError("array table", v.Type()), idx) } elem := v.Index(idx) _, err := d.handleArrayTable(key, elem) @@ -400,7 +400,7 @@ func (d *decoder) handleArrayTableCollection(key ast.Iterator, v reflect.Value) return d.handleArrayTable(key, v) } -func (d *decoder) handleKeyPart(key ast.Iterator, v reflect.Value, nextFn handlerFn, makeFn valueMakerFn) (reflect.Value, error) { +func (d *decoder) handleKeyPart(key unstable.Iterator, v reflect.Value, nextFn handlerFn, makeFn valueMakerFn) (reflect.Value, error) { var rv reflect.Value // First, dispatch over v to make sure it is a valid object. @@ -518,7 +518,7 @@ func (d *decoder) handleKeyPart(key ast.Iterator, v reflect.Value, nextFn handle // HandleArrayTablePart navigates the Go structure v using the key v. It is // only used for the prefix (non-last) parts of an array-table. When // encountering a collection, it should go to the last element. -func (d *decoder) handleArrayTablePart(key ast.Iterator, v reflect.Value) (reflect.Value, error) { +func (d *decoder) handleArrayTablePart(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { var makeFn valueMakerFn if key.IsLast() { makeFn = makeSliceInterface @@ -530,10 +530,10 @@ func (d *decoder) handleArrayTablePart(key ast.Iterator, v reflect.Value) (refle // HandleTable returns a reference when it has checked the next expression but // cannot handle it. -func (d *decoder) handleTable(key ast.Iterator, v reflect.Value) (reflect.Value, error) { +func (d *decoder) handleTable(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { if v.Kind() == reflect.Slice { if v.Len() == 0 { - return reflect.Value{}, newDecodeError(key.Node().Data, "cannot store a table in a slice") + return reflect.Value{}, unstable.NewParserError(key.Node().Data, "cannot store a table in a slice") } elem := v.Index(v.Len() - 1) x, err := d.handleTable(key, elem) @@ -560,7 +560,7 @@ func (d *decoder) handleKeyValues(v reflect.Value) (reflect.Value, error) { var rv reflect.Value for d.nextExpr() { expr := d.expr() - if expr.Kind != ast.KeyValue { + if expr.Kind != unstable.KeyValue { // Stash the expression so that fromParser can just loop and use // the right handler. // We could just recurse ourselves here, but at least this gives a @@ -587,7 +587,7 @@ func (d *decoder) handleKeyValues(v reflect.Value) (reflect.Value, error) { } type ( - handlerFn func(key ast.Iterator, v reflect.Value) (reflect.Value, error) + handlerFn func(key unstable.Iterator, v reflect.Value) (reflect.Value, error) valueMakerFn func() reflect.Value ) @@ -599,11 +599,11 @@ func makeSliceInterface() reflect.Value { return reflect.MakeSlice(sliceInterfaceType, 0, 16) } -func (d *decoder) handleTablePart(key ast.Iterator, v reflect.Value) (reflect.Value, error) { +func (d *decoder) handleTablePart(key unstable.Iterator, v reflect.Value) (reflect.Value, error) { return d.handleKeyPart(key, v, d.handleTable, makeMapStringInterface) } -func (d *decoder) tryTextUnmarshaler(node *ast.Node, v reflect.Value) (bool, error) { +func (d *decoder) tryTextUnmarshaler(node *unstable.Node, v reflect.Value) (bool, error) { // Special case for time, because we allow to unmarshal to it from // different kind of AST nodes. if v.Type() == timeType { @@ -613,7 +613,7 @@ func (d *decoder) tryTextUnmarshaler(node *ast.Node, v reflect.Value) (bool, err if v.CanAddr() && v.Addr().Type().Implements(textUnmarshalerType) { err := v.Addr().Interface().(encoding.TextUnmarshaler).UnmarshalText(node.Data) if err != nil { - return false, newDecodeError(d.p.Raw(node.Raw), "%w", err) + return false, unstable.NewParserError(d.p.Raw(node.Raw), "%w", err) } return true, nil @@ -622,7 +622,7 @@ func (d *decoder) tryTextUnmarshaler(node *ast.Node, v reflect.Value) (bool, err return false, nil } -func (d *decoder) handleValue(value *ast.Node, v reflect.Value) error { +func (d *decoder) handleValue(value *unstable.Node, v reflect.Value) error { for v.Kind() == reflect.Ptr { v = initAndDereferencePointer(v) } @@ -633,32 +633,32 @@ func (d *decoder) handleValue(value *ast.Node, v reflect.Value) error { } switch value.Kind { - case ast.String: + case unstable.String: return d.unmarshalString(value, v) - case ast.Integer: + case unstable.Integer: return d.unmarshalInteger(value, v) - case ast.Float: + case unstable.Float: return d.unmarshalFloat(value, v) - case ast.Bool: + case unstable.Bool: return d.unmarshalBool(value, v) - case ast.DateTime: + case unstable.DateTime: return d.unmarshalDateTime(value, v) - case ast.LocalDate: + case unstable.LocalDate: return d.unmarshalLocalDate(value, v) - case ast.LocalTime: + case unstable.LocalTime: return d.unmarshalLocalTime(value, v) - case ast.LocalDateTime: + case unstable.LocalDateTime: return d.unmarshalLocalDateTime(value, v) - case ast.InlineTable: + case unstable.InlineTable: return d.unmarshalInlineTable(value, v) - case ast.Array: + case unstable.Array: return d.unmarshalArray(value, v) default: panic(fmt.Errorf("handleValue not implemented for %s", value.Kind)) } } -func (d *decoder) unmarshalArray(array *ast.Node, v reflect.Value) error { +func (d *decoder) unmarshalArray(array *unstable.Node, v reflect.Value) error { switch v.Kind() { case reflect.Slice: if v.IsNil() { @@ -729,7 +729,7 @@ func (d *decoder) unmarshalArray(array *ast.Node, v reflect.Value) error { return nil } -func (d *decoder) unmarshalInlineTable(itable *ast.Node, v reflect.Value) error { +func (d *decoder) unmarshalInlineTable(itable *unstable.Node, v reflect.Value) error { // Make sure v is an initialized object. switch v.Kind() { case reflect.Map: @@ -746,7 +746,7 @@ func (d *decoder) unmarshalInlineTable(itable *ast.Node, v reflect.Value) error } return d.unmarshalInlineTable(itable, elem) default: - return newDecodeError(itable.Data, "cannot store inline table in Go type %s", v.Kind()) + return unstable.NewParserError(itable.Data, "cannot store inline table in Go type %s", v.Kind()) } it := itable.Children() @@ -765,7 +765,7 @@ func (d *decoder) unmarshalInlineTable(itable *ast.Node, v reflect.Value) error return nil } -func (d *decoder) unmarshalDateTime(value *ast.Node, v reflect.Value) error { +func (d *decoder) unmarshalDateTime(value *unstable.Node, v reflect.Value) error { dt, err := parseDateTime(value.Data) if err != nil { return err @@ -775,7 +775,7 @@ func (d *decoder) unmarshalDateTime(value *ast.Node, v reflect.Value) error { return nil } -func (d *decoder) unmarshalLocalDate(value *ast.Node, v reflect.Value) error { +func (d *decoder) unmarshalLocalDate(value *unstable.Node, v reflect.Value) error { ld, err := parseLocalDate(value.Data) if err != nil { return err @@ -792,28 +792,28 @@ func (d *decoder) unmarshalLocalDate(value *ast.Node, v reflect.Value) error { return nil } -func (d *decoder) unmarshalLocalTime(value *ast.Node, v reflect.Value) error { +func (d *decoder) unmarshalLocalTime(value *unstable.Node, v reflect.Value) error { lt, rest, err := parseLocalTime(value.Data) if err != nil { return err } if len(rest) > 0 { - return newDecodeError(rest, "extra characters at the end of a local time") + return unstable.NewParserError(rest, "extra characters at the end of a local time") } v.Set(reflect.ValueOf(lt)) return nil } -func (d *decoder) unmarshalLocalDateTime(value *ast.Node, v reflect.Value) error { +func (d *decoder) unmarshalLocalDateTime(value *unstable.Node, v reflect.Value) error { ldt, rest, err := parseLocalDateTime(value.Data) if err != nil { return err } if len(rest) > 0 { - return newDecodeError(rest, "extra characters at the end of a local date time") + return unstable.NewParserError(rest, "extra characters at the end of a local date time") } if v.Type() == timeType { @@ -828,7 +828,7 @@ func (d *decoder) unmarshalLocalDateTime(value *ast.Node, v reflect.Value) error return nil } -func (d *decoder) unmarshalBool(value *ast.Node, v reflect.Value) error { +func (d *decoder) unmarshalBool(value *unstable.Node, v reflect.Value) error { b := value.Data[0] == 't' switch v.Kind() { @@ -837,13 +837,13 @@ func (d *decoder) unmarshalBool(value *ast.Node, v reflect.Value) error { case reflect.Interface: v.Set(reflect.ValueOf(b)) default: - return newDecodeError(value.Data, "cannot assign boolean to a %t", b) + return unstable.NewParserError(value.Data, "cannot assign boolean to a %t", b) } return nil } -func (d *decoder) unmarshalFloat(value *ast.Node, v reflect.Value) error { +func (d *decoder) unmarshalFloat(value *unstable.Node, v reflect.Value) error { f, err := parseFloat(value.Data) if err != nil { return err @@ -854,13 +854,13 @@ func (d *decoder) unmarshalFloat(value *ast.Node, v reflect.Value) error { v.SetFloat(f) case reflect.Float32: if f > math.MaxFloat32 { - return newDecodeError(value.Data, "number %f does not fit in a float32", f) + return unstable.NewParserError(value.Data, "number %f does not fit in a float32", f) } v.SetFloat(f) case reflect.Interface: v.Set(reflect.ValueOf(f)) default: - return newDecodeError(value.Data, "float cannot be assigned to %s", v.Kind()) + return unstable.NewParserError(value.Data, "float cannot be assigned to %s", v.Kind()) } return nil @@ -886,7 +886,7 @@ func init() { } } -func (d *decoder) unmarshalInteger(value *ast.Node, v reflect.Value) error { +func (d *decoder) unmarshalInteger(value *unstable.Node, v reflect.Value) error { i, err := parseInteger(value.Data) if err != nil { return err @@ -967,20 +967,20 @@ func (d *decoder) unmarshalInteger(value *ast.Node, v reflect.Value) error { return nil } -func (d *decoder) unmarshalString(value *ast.Node, v reflect.Value) error { +func (d *decoder) unmarshalString(value *unstable.Node, v reflect.Value) error { switch v.Kind() { case reflect.String: v.SetString(string(value.Data)) case reflect.Interface: v.Set(reflect.ValueOf(string(value.Data))) default: - return newDecodeError(d.p.Raw(value.Raw), "cannot store TOML string into a Go %s", v.Kind()) + return unstable.NewParserError(d.p.Raw(value.Raw), "cannot store TOML string into a Go %s", v.Kind()) } return nil } -func (d *decoder) handleKeyValue(expr *ast.Node, v reflect.Value) (reflect.Value, error) { +func (d *decoder) handleKeyValue(expr *unstable.Node, v reflect.Value) (reflect.Value, error) { d.strict.EnterKeyValue(expr) v, err := d.handleKeyValueInner(expr.Key(), expr.Value(), v) @@ -994,7 +994,7 @@ func (d *decoder) handleKeyValue(expr *ast.Node, v reflect.Value) (reflect.Value return v, err } -func (d *decoder) handleKeyValueInner(key ast.Iterator, value *ast.Node, v reflect.Value) (reflect.Value, error) { +func (d *decoder) handleKeyValueInner(key unstable.Iterator, value *unstable.Node, v reflect.Value) (reflect.Value, error) { if key.Next() { // Still scoping the key return d.handleKeyValuePart(key, value, v) @@ -1004,7 +1004,7 @@ func (d *decoder) handleKeyValueInner(key ast.Iterator, value *ast.Node, v refle return reflect.Value{}, d.handleValue(value, v) } -func (d *decoder) handleKeyValuePart(key ast.Iterator, value *ast.Node, v reflect.Value) (reflect.Value, error) { +func (d *decoder) handleKeyValuePart(key unstable.Iterator, value *unstable.Node, v reflect.Value) (reflect.Value, error) { // contains the replacement for v var rv reflect.Value diff --git a/test/performance/vendor/github.com/pelletier/go-toml/v2/internal/ast/ast.go b/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go similarity index 60% rename from test/performance/vendor/github.com/pelletier/go-toml/v2/internal/ast/ast.go rename to vendor/github.com/pelletier/go-toml/v2/unstable/ast.go index 9dec2e0007..b60d9bfd6d 100644 --- a/test/performance/vendor/github.com/pelletier/go-toml/v2/internal/ast/ast.go +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/ast.go @@ -1,4 +1,4 @@ -package ast +package unstable import ( "fmt" @@ -7,13 +7,16 @@ import ( "github.com/pelletier/go-toml/v2/internal/danger" ) -// Iterator starts uninitialized, you need to call Next() first. +// Iterator over a sequence of nodes. +// +// Starts uninitialized, you need to call Next() first. // // For example: // // it := n.Children() // for it.Next() { -// it.Node() +// n := it.Node() +// // do something with n // } type Iterator struct { started bool @@ -32,42 +35,31 @@ func (c *Iterator) Next() bool { } // IsLast returns true if the current node of the iterator is the last -// one. Subsequent call to Next() will return false. +// one. Subsequent calls to Next() will return false. func (c *Iterator) IsLast() bool { return c.node.next == 0 } -// Node returns a copy of the node pointed at by the iterator. +// Node returns a pointer to the node pointed at by the iterator. func (c *Iterator) Node() *Node { return c.node } -// Root contains a full AST. +// Node in a TOML expression AST. // -// It is immutable once constructed with Builder. -type Root struct { - nodes []Node -} - -// Iterator over the top level nodes. -func (r *Root) Iterator() Iterator { - it := Iterator{} - if len(r.nodes) > 0 { - it.node = &r.nodes[0] - } - return it -} - -func (r *Root) at(idx Reference) *Node { - return &r.nodes[idx] -} - -// Arrays have one child per element in the array. InlineTables have -// one child per key-value pair in the table. KeyValues have at least -// two children. The first one is the value. The rest make a -// potentially dotted key. Table and Array table have one child per -// element of the key they represent (same as KeyValue, but without -// the last node being the value). +// Depending on Kind, its sequence of children should be interpreted +// differently. +// +// - Array have one child per element in the array. +// - InlineTable have one child per key-value in the table (each of kind +// InlineTable). +// - KeyValue have at least two children. The first one is the value. The rest +// make a potentially dotted key. +// - Table and ArrayTable's children represent a dotted key (same as +// KeyValue, but without the first node being the value). +// +// When relevant, Raw describes the range of bytes this node is refering to in +// the input document. Use Parser.Raw() to retrieve the actual bytes. type Node struct { Kind Kind Raw Range // Raw bytes from the input. @@ -80,13 +72,13 @@ type Node struct { child int // 0 if no child } +// Range of bytes in the document. type Range struct { Offset uint32 Length uint32 } -// Next returns a copy of the next node, or an invalid Node if there -// is no next node. +// Next returns a pointer to the next node, or nil if there is no next node. func (n *Node) Next() *Node { if n.next == 0 { return nil @@ -96,9 +88,9 @@ func (n *Node) Next() *Node { return (*Node)(danger.Stride(ptr, size, n.next)) } -// Child returns a copy of the first child node of this node. Other -// children can be accessed calling Next on the first child. Returns -// an invalid Node if there is none. +// Child returns a pointer to the first child node of this node. Other children +// can be accessed calling Next on the first child. Returns an nil if this Node +// has no child. func (n *Node) Child() *Node { if n.child == 0 { return nil @@ -113,9 +105,9 @@ func (n *Node) Valid() bool { return n != nil } -// Key returns the child nodes making the Key on a supported -// node. Panics otherwise. They are guaranteed to be all be of the -// Kind Key. A simple key would return just one element. +// Key returns the children nodes making the Key on a supported node. Panics +// otherwise. They are guaranteed to be all be of the Kind Key. A simple key +// would return just one element. func (n *Node) Key() Iterator { switch n.Kind { case KeyValue: diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go b/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go new file mode 100644 index 0000000000..9538e30df9 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/builder.go @@ -0,0 +1,71 @@ +package unstable + +// root contains a full AST. +// +// It is immutable once constructed with Builder. +type root struct { + nodes []Node +} + +// Iterator over the top level nodes. +func (r *root) Iterator() Iterator { + it := Iterator{} + if len(r.nodes) > 0 { + it.node = &r.nodes[0] + } + return it +} + +func (r *root) at(idx reference) *Node { + return &r.nodes[idx] +} + +type reference int + +const invalidReference reference = -1 + +func (r reference) Valid() bool { + return r != invalidReference +} + +type builder struct { + tree root + lastIdx int +} + +func (b *builder) Tree() *root { + return &b.tree +} + +func (b *builder) NodeAt(ref reference) *Node { + return b.tree.at(ref) +} + +func (b *builder) Reset() { + b.tree.nodes = b.tree.nodes[:0] + b.lastIdx = 0 +} + +func (b *builder) Push(n Node) reference { + b.lastIdx = len(b.tree.nodes) + b.tree.nodes = append(b.tree.nodes, n) + return reference(b.lastIdx) +} + +func (b *builder) PushAndChain(n Node) reference { + newIdx := len(b.tree.nodes) + b.tree.nodes = append(b.tree.nodes, n) + if b.lastIdx >= 0 { + b.tree.nodes[b.lastIdx].next = newIdx - b.lastIdx + } + b.lastIdx = newIdx + return reference(b.lastIdx) +} + +func (b *builder) AttachChild(parent reference, child reference) { + b.tree.nodes[parent].child = int(child) - int(parent) +} + +func (b *builder) Chain(from reference, to reference) { + b.tree.nodes[from].next = int(to) - int(from) +} diff --git a/vendor/github.com/pelletier/go-toml/v2/unstable/doc.go b/vendor/github.com/pelletier/go-toml/v2/unstable/doc.go new file mode 100644 index 0000000000..7ff26c53c7 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/doc.go @@ -0,0 +1,3 @@ +// Package unstable provides APIs that do not meet the backward compatibility +// guarantees yet. +package unstable diff --git a/vendor/github.com/pelletier/go-toml/v2/internal/ast/kind.go b/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go similarity index 81% rename from vendor/github.com/pelletier/go-toml/v2/internal/ast/kind.go rename to vendor/github.com/pelletier/go-toml/v2/unstable/kind.go index 2b50c67fce..ff9df1bef8 100644 --- a/vendor/github.com/pelletier/go-toml/v2/internal/ast/kind.go +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/kind.go @@ -1,25 +1,26 @@ -package ast +package unstable import "fmt" +// Kind represents the type of TOML structure contained in a given Node. type Kind int const ( - // meta + // Meta Invalid Kind = iota Comment Key - // top level structures + // Top level structures Table ArrayTable KeyValue - // containers values + // Containers values Array InlineTable - // values + // Values String Bool Float @@ -30,6 +31,7 @@ const ( DateTime ) +// String implementation of fmt.Stringer. func (k Kind) String() string { switch k { case Invalid: diff --git a/test/performance/vendor/github.com/pelletier/go-toml/v2/parser.go b/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go similarity index 70% rename from test/performance/vendor/github.com/pelletier/go-toml/v2/parser.go rename to vendor/github.com/pelletier/go-toml/v2/unstable/parser.go index 9859a795bd..52db88e7a5 100644 --- a/test/performance/vendor/github.com/pelletier/go-toml/v2/parser.go +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/parser.go @@ -1,50 +1,108 @@ -package toml +package unstable import ( "bytes" + "fmt" "unicode" - "github.com/pelletier/go-toml/v2/internal/ast" + "github.com/pelletier/go-toml/v2/internal/characters" "github.com/pelletier/go-toml/v2/internal/danger" ) -type parser struct { - builder ast.Builder - ref ast.Reference +// ParserError describes an error relative to the content of the document. +// +// It cannot outlive the instance of Parser it refers to, and may cause panics +// if the parser is reset. +type ParserError struct { + Highlight []byte + Message string + Key []string // optional +} + +// Error is the implementation of the error interface. +func (e *ParserError) Error() string { + return e.Message +} + +// NewParserError is a convenience function to create a ParserError +// +// Warning: Highlight needs to be a subslice of Parser.data, so only slices +// returned by Parser.Raw are valid candidates. +func NewParserError(highlight []byte, format string, args ...interface{}) error { + return &ParserError{ + Highlight: highlight, + Message: fmt.Errorf(format, args...).Error(), + } +} + +// Parser scans over a TOML-encoded document and generates an iterative AST. +// +// To prime the Parser, first reset it with the contents of a TOML document. +// Then, process all top-level expressions sequentially. See Example. +// +// Don't forget to check Error() after you're done parsing. +// +// Each top-level expression needs to be fully processed before calling +// NextExpression() again. Otherwise, calls to various Node methods may panic if +// the parser has moved on the next expression. +// +// For performance reasons, go-toml doesn't make a copy of the input bytes to +// the parser. Make sure to copy all the bytes you need to outlive the slice +// given to the parser. +// +// The parser doesn't provide nodes for comments yet, nor for whitespace. +type Parser struct { data []byte + builder builder + ref reference left []byte err error first bool } -func (p *parser) Range(b []byte) ast.Range { - return ast.Range{ +// Data returns the slice provided to the last call to Reset. +func (p *Parser) Data() []byte { + return p.data +} + +// Range returns a range description that corresponds to a given slice of the +// input. If the argument is not a subslice of the parser input, this function +// panics. +func (p *Parser) Range(b []byte) Range { + return Range{ Offset: uint32(danger.SubsliceOffset(p.data, b)), Length: uint32(len(b)), } } -func (p *parser) Raw(raw ast.Range) []byte { +// Raw returns the slice corresponding to the bytes in the given range. +func (p *Parser) Raw(raw Range) []byte { return p.data[raw.Offset : raw.Offset+raw.Length] } -func (p *parser) Reset(b []byte) { +// Reset brings the parser to its initial state for a given input. It wipes an +// reuses internal storage to reduce allocation. +func (p *Parser) Reset(b []byte) { p.builder.Reset() - p.ref = ast.InvalidReference + p.ref = invalidReference p.data = b p.left = b p.err = nil p.first = true } -//nolint:cyclop -func (p *parser) NextExpression() bool { +// NextExpression parses the next top-level expression. If an expression was +// successfully parsed, it returns true. If the parser is at the end of the +// document or an error occurred, it returns false. +// +// Retrieve the parsed expression with Expression(). +func (p *Parser) NextExpression() bool { if len(p.left) == 0 || p.err != nil { return false } p.builder.Reset() - p.ref = ast.InvalidReference + p.ref = invalidReference for { if len(p.left) == 0 || p.err != nil { @@ -73,15 +131,18 @@ func (p *parser) NextExpression() bool { } } -func (p *parser) Expression() *ast.Node { +// Expression returns a pointer to the node representing the last successfully +// parsed expresion. +func (p *Parser) Expression() *Node { return p.builder.NodeAt(p.ref) } -func (p *parser) Error() error { +// Error returns any error that has occured during parsing. +func (p *Parser) Error() error { return p.err } -func (p *parser) parseNewline(b []byte) ([]byte, error) { +func (p *Parser) parseNewline(b []byte) ([]byte, error) { if b[0] == '\n' { return b[1:], nil } @@ -91,14 +152,14 @@ func (p *parser) parseNewline(b []byte) ([]byte, error) { return rest, err } - return nil, newDecodeError(b[0:1], "expected newline but got %#U", b[0]) + return nil, NewParserError(b[0:1], "expected newline but got %#U", b[0]) } -func (p *parser) parseExpression(b []byte) (ast.Reference, []byte, error) { +func (p *Parser) parseExpression(b []byte) (reference, []byte, error) { // expression = ws [ comment ] // expression =/ ws keyval ws [ comment ] // expression =/ ws table ws [ comment ] - ref := ast.InvalidReference + ref := invalidReference b = p.parseWhitespace(b) @@ -136,7 +197,7 @@ func (p *parser) parseExpression(b []byte) (ast.Reference, []byte, error) { return ref, b, nil } -func (p *parser) parseTable(b []byte) (ast.Reference, []byte, error) { +func (p *Parser) parseTable(b []byte) (reference, []byte, error) { // table = std-table / array-table if len(b) > 1 && b[1] == '[' { return p.parseArrayTable(b) @@ -145,12 +206,12 @@ func (p *parser) parseTable(b []byte) (ast.Reference, []byte, error) { return p.parseStdTable(b) } -func (p *parser) parseArrayTable(b []byte) (ast.Reference, []byte, error) { +func (p *Parser) parseArrayTable(b []byte) (reference, []byte, error) { // array-table = array-table-open key array-table-close // array-table-open = %x5B.5B ws ; [[ Double left square bracket // array-table-close = ws %x5D.5D ; ]] Double right square bracket - ref := p.builder.Push(ast.Node{ - Kind: ast.ArrayTable, + ref := p.builder.Push(Node{ + Kind: ArrayTable, }) b = b[2:] @@ -174,12 +235,12 @@ func (p *parser) parseArrayTable(b []byte) (ast.Reference, []byte, error) { return ref, b, err } -func (p *parser) parseStdTable(b []byte) (ast.Reference, []byte, error) { +func (p *Parser) parseStdTable(b []byte) (reference, []byte, error) { // std-table = std-table-open key std-table-close // std-table-open = %x5B ws ; [ Left square bracket // std-table-close = ws %x5D ; ] Right square bracket - ref := p.builder.Push(ast.Node{ - Kind: ast.Table, + ref := p.builder.Push(Node{ + Kind: Table, }) b = b[1:] @@ -199,15 +260,15 @@ func (p *parser) parseStdTable(b []byte) (ast.Reference, []byte, error) { return ref, b, err } -func (p *parser) parseKeyval(b []byte) (ast.Reference, []byte, error) { +func (p *Parser) parseKeyval(b []byte) (reference, []byte, error) { // keyval = key keyval-sep val - ref := p.builder.Push(ast.Node{ - Kind: ast.KeyValue, + ref := p.builder.Push(Node{ + Kind: KeyValue, }) key, b, err := p.parseKey(b) if err != nil { - return ast.InvalidReference, nil, err + return invalidReference, nil, err } // keyval-sep = ws %x3D ws ; = @@ -215,12 +276,12 @@ func (p *parser) parseKeyval(b []byte) (ast.Reference, []byte, error) { b = p.parseWhitespace(b) if len(b) == 0 { - return ast.InvalidReference, nil, newDecodeError(b, "expected = after a key, but the document ends there") + return invalidReference, nil, NewParserError(b, "expected = after a key, but the document ends there") } b, err = expect('=', b) if err != nil { - return ast.InvalidReference, nil, err + return invalidReference, nil, err } b = p.parseWhitespace(b) @@ -237,12 +298,12 @@ func (p *parser) parseKeyval(b []byte) (ast.Reference, []byte, error) { } //nolint:cyclop,funlen -func (p *parser) parseVal(b []byte) (ast.Reference, []byte, error) { +func (p *Parser) parseVal(b []byte) (reference, []byte, error) { // val = string / boolean / array / inline-table / date-time / float / integer - ref := ast.InvalidReference + ref := invalidReference if len(b) == 0 { - return ref, nil, newDecodeError(b, "expected value, not eof") + return ref, nil, NewParserError(b, "expected value, not eof") } var err error @@ -259,8 +320,8 @@ func (p *parser) parseVal(b []byte) (ast.Reference, []byte, error) { } if err == nil { - ref = p.builder.Push(ast.Node{ - Kind: ast.String, + ref = p.builder.Push(Node{ + Kind: String, Raw: p.Range(raw), Data: v, }) @@ -277,8 +338,8 @@ func (p *parser) parseVal(b []byte) (ast.Reference, []byte, error) { } if err == nil { - ref = p.builder.Push(ast.Node{ - Kind: ast.String, + ref = p.builder.Push(Node{ + Kind: String, Raw: p.Range(raw), Data: v, }) @@ -287,22 +348,22 @@ func (p *parser) parseVal(b []byte) (ast.Reference, []byte, error) { return ref, b, err case 't': if !scanFollowsTrue(b) { - return ref, nil, newDecodeError(atmost(b, 4), "expected 'true'") + return ref, nil, NewParserError(atmost(b, 4), "expected 'true'") } - ref = p.builder.Push(ast.Node{ - Kind: ast.Bool, + ref = p.builder.Push(Node{ + Kind: Bool, Data: b[:4], }) return ref, b[4:], nil case 'f': if !scanFollowsFalse(b) { - return ref, nil, newDecodeError(atmost(b, 5), "expected 'false'") + return ref, nil, NewParserError(atmost(b, 5), "expected 'false'") } - ref = p.builder.Push(ast.Node{ - Kind: ast.Bool, + ref = p.builder.Push(Node{ + Kind: Bool, Data: b[:5], }) @@ -324,7 +385,7 @@ func atmost(b []byte, n int) []byte { return b[:n] } -func (p *parser) parseLiteralString(b []byte) ([]byte, []byte, []byte, error) { +func (p *Parser) parseLiteralString(b []byte) ([]byte, []byte, []byte, error) { v, rest, err := scanLiteralString(b) if err != nil { return nil, nil, nil, err @@ -333,19 +394,19 @@ func (p *parser) parseLiteralString(b []byte) ([]byte, []byte, []byte, error) { return v, v[1 : len(v)-1], rest, nil } -func (p *parser) parseInlineTable(b []byte) (ast.Reference, []byte, error) { +func (p *Parser) parseInlineTable(b []byte) (reference, []byte, error) { // inline-table = inline-table-open [ inline-table-keyvals ] inline-table-close // inline-table-open = %x7B ws ; { // inline-table-close = ws %x7D ; } // inline-table-sep = ws %x2C ws ; , Comma // inline-table-keyvals = keyval [ inline-table-sep inline-table-keyvals ] - parent := p.builder.Push(ast.Node{ - Kind: ast.InlineTable, + parent := p.builder.Push(Node{ + Kind: InlineTable, }) first := true - var child ast.Reference + var child reference b = b[1:] @@ -356,7 +417,7 @@ func (p *parser) parseInlineTable(b []byte) (ast.Reference, []byte, error) { b = p.parseWhitespace(b) if len(b) == 0 { - return parent, nil, newDecodeError(previousB[:1], "inline table is incomplete") + return parent, nil, NewParserError(previousB[:1], "inline table is incomplete") } if b[0] == '}' { @@ -371,7 +432,7 @@ func (p *parser) parseInlineTable(b []byte) (ast.Reference, []byte, error) { b = p.parseWhitespace(b) } - var kv ast.Reference + var kv reference kv, b, err = p.parseKeyval(b) if err != nil { @@ -394,7 +455,7 @@ func (p *parser) parseInlineTable(b []byte) (ast.Reference, []byte, error) { } //nolint:funlen,cyclop -func (p *parser) parseValArray(b []byte) (ast.Reference, []byte, error) { +func (p *Parser) parseValArray(b []byte) (reference, []byte, error) { // array = array-open [ array-values ] ws-comment-newline array-close // array-open = %x5B ; [ // array-close = %x5D ; ] @@ -405,13 +466,13 @@ func (p *parser) parseValArray(b []byte) (ast.Reference, []byte, error) { arrayStart := b b = b[1:] - parent := p.builder.Push(ast.Node{ - Kind: ast.Array, + parent := p.builder.Push(Node{ + Kind: Array, }) first := true - var lastChild ast.Reference + var lastChild reference var err error for len(b) > 0 { @@ -421,7 +482,7 @@ func (p *parser) parseValArray(b []byte) (ast.Reference, []byte, error) { } if len(b) == 0 { - return parent, nil, newDecodeError(arrayStart[:1], "array is incomplete") + return parent, nil, NewParserError(arrayStart[:1], "array is incomplete") } if b[0] == ']' { @@ -430,7 +491,7 @@ func (p *parser) parseValArray(b []byte) (ast.Reference, []byte, error) { if b[0] == ',' { if first { - return parent, nil, newDecodeError(b[0:1], "array cannot start with comma") + return parent, nil, NewParserError(b[0:1], "array cannot start with comma") } b = b[1:] @@ -439,7 +500,7 @@ func (p *parser) parseValArray(b []byte) (ast.Reference, []byte, error) { return parent, nil, err } } else if !first { - return parent, nil, newDecodeError(b[0:1], "array elements must be separated by commas") + return parent, nil, NewParserError(b[0:1], "array elements must be separated by commas") } // TOML allows trailing commas in arrays. @@ -447,7 +508,7 @@ func (p *parser) parseValArray(b []byte) (ast.Reference, []byte, error) { break } - var valueRef ast.Reference + var valueRef reference valueRef, b, err = p.parseVal(b) if err != nil { return parent, nil, err @@ -472,7 +533,7 @@ func (p *parser) parseValArray(b []byte) (ast.Reference, []byte, error) { return parent, rest, err } -func (p *parser) parseOptionalWhitespaceCommentNewline(b []byte) ([]byte, error) { +func (p *Parser) parseOptionalWhitespaceCommentNewline(b []byte) ([]byte, error) { for len(b) > 0 { var err error b = p.parseWhitespace(b) @@ -501,7 +562,7 @@ func (p *parser) parseOptionalWhitespaceCommentNewline(b []byte) ([]byte, error) return b, nil } -func (p *parser) parseMultilineLiteralString(b []byte) ([]byte, []byte, []byte, error) { +func (p *Parser) parseMultilineLiteralString(b []byte) ([]byte, []byte, []byte, error) { token, rest, err := scanMultilineLiteralString(b) if err != nil { return nil, nil, nil, err @@ -520,7 +581,7 @@ func (p *parser) parseMultilineLiteralString(b []byte) ([]byte, []byte, []byte, } //nolint:funlen,gocognit,cyclop -func (p *parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, error) { +func (p *Parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, error) { // ml-basic-string = ml-basic-string-delim [ newline ] ml-basic-body // ml-basic-string-delim // ml-basic-string-delim = 3quotation-mark @@ -551,11 +612,11 @@ func (p *parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, er if !escaped { str := token[startIdx:endIdx] - verr := utf8TomlValidAlreadyEscaped(str) + verr := characters.Utf8TomlValidAlreadyEscaped(str) if verr.Zero() { return token, str, rest, nil } - return nil, nil, nil, newDecodeError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8") + return nil, nil, nil, NewParserError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8") } var builder bytes.Buffer @@ -635,13 +696,13 @@ func (p *parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, er builder.WriteRune(x) i += 8 default: - return nil, nil, nil, newDecodeError(token[i:i+1], "invalid escaped character %#U", c) + return nil, nil, nil, NewParserError(token[i:i+1], "invalid escaped character %#U", c) } i++ } else { - size := utf8ValidNext(token[i:]) + size := characters.Utf8ValidNext(token[i:]) if size == 0 { - return nil, nil, nil, newDecodeError(token[i:i+1], "invalid character %#U", c) + return nil, nil, nil, NewParserError(token[i:i+1], "invalid character %#U", c) } builder.Write(token[i : i+size]) i += size @@ -651,7 +712,7 @@ func (p *parser) parseMultilineBasicString(b []byte) ([]byte, []byte, []byte, er return token, builder.Bytes(), rest, nil } -func (p *parser) parseKey(b []byte) (ast.Reference, []byte, error) { +func (p *Parser) parseKey(b []byte) (reference, []byte, error) { // key = simple-key / dotted-key // simple-key = quoted-key / unquoted-key // @@ -662,11 +723,11 @@ func (p *parser) parseKey(b []byte) (ast.Reference, []byte, error) { // dot-sep = ws %x2E ws ; . Period raw, key, b, err := p.parseSimpleKey(b) if err != nil { - return ast.InvalidReference, nil, err + return invalidReference, nil, err } - ref := p.builder.Push(ast.Node{ - Kind: ast.Key, + ref := p.builder.Push(Node{ + Kind: Key, Raw: p.Range(raw), Data: key, }) @@ -681,8 +742,8 @@ func (p *parser) parseKey(b []byte) (ast.Reference, []byte, error) { return ref, nil, err } - p.builder.PushAndChain(ast.Node{ - Kind: ast.Key, + p.builder.PushAndChain(Node{ + Kind: Key, Raw: p.Range(raw), Data: key, }) @@ -694,9 +755,9 @@ func (p *parser) parseKey(b []byte) (ast.Reference, []byte, error) { return ref, b, nil } -func (p *parser) parseSimpleKey(b []byte) (raw, key, rest []byte, err error) { +func (p *Parser) parseSimpleKey(b []byte) (raw, key, rest []byte, err error) { if len(b) == 0 { - return nil, nil, nil, newDecodeError(b, "expected key but found none") + return nil, nil, nil, NewParserError(b, "expected key but found none") } // simple-key = quoted-key / unquoted-key @@ -711,12 +772,12 @@ func (p *parser) parseSimpleKey(b []byte) (raw, key, rest []byte, err error) { key, rest = scanUnquotedKey(b) return key, key, rest, nil default: - return nil, nil, nil, newDecodeError(b[0:1], "invalid character at start of key: %c", b[0]) + return nil, nil, nil, NewParserError(b[0:1], "invalid character at start of key: %c", b[0]) } } //nolint:funlen,cyclop -func (p *parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) { +func (p *Parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) { // basic-string = quotation-mark *basic-char quotation-mark // quotation-mark = %x22 ; " // basic-char = basic-unescaped / escaped @@ -744,11 +805,11 @@ func (p *parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) { // validate the string and return a direct reference to the buffer. if !escaped { str := token[startIdx:endIdx] - verr := utf8TomlValidAlreadyEscaped(str) + verr := characters.Utf8TomlValidAlreadyEscaped(str) if verr.Zero() { return token, str, rest, nil } - return nil, nil, nil, newDecodeError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8") + return nil, nil, nil, NewParserError(str[verr.Index:verr.Index+verr.Size], "invalid UTF-8") } i := startIdx @@ -795,13 +856,13 @@ func (p *parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) { builder.WriteRune(x) i += 8 default: - return nil, nil, nil, newDecodeError(token[i:i+1], "invalid escaped character %#U", c) + return nil, nil, nil, NewParserError(token[i:i+1], "invalid escaped character %#U", c) } i++ } else { - size := utf8ValidNext(token[i:]) + size := characters.Utf8ValidNext(token[i:]) if size == 0 { - return nil, nil, nil, newDecodeError(token[i:i+1], "invalid character %#U", c) + return nil, nil, nil, NewParserError(token[i:i+1], "invalid character %#U", c) } builder.Write(token[i : i+size]) i += size @@ -813,7 +874,7 @@ func (p *parser) parseBasicString(b []byte) ([]byte, []byte, []byte, error) { func hexToRune(b []byte, length int) (rune, error) { if len(b) < length { - return -1, newDecodeError(b, "unicode point needs %d character, not %d", length, len(b)) + return -1, NewParserError(b, "unicode point needs %d character, not %d", length, len(b)) } b = b[:length] @@ -828,19 +889,19 @@ func hexToRune(b []byte, length int) (rune, error) { case 'A' <= c && c <= 'F': d = uint32(c - 'A' + 10) default: - return -1, newDecodeError(b[i:i+1], "non-hex character") + return -1, NewParserError(b[i:i+1], "non-hex character") } r = r*16 + d } if r > unicode.MaxRune || 0xD800 <= r && r < 0xE000 { - return -1, newDecodeError(b, "escape sequence is invalid Unicode code point") + return -1, NewParserError(b, "escape sequence is invalid Unicode code point") } return rune(r), nil } -func (p *parser) parseWhitespace(b []byte) []byte { +func (p *Parser) parseWhitespace(b []byte) []byte { // ws = *wschar // wschar = %x20 ; Space // wschar =/ %x09 ; Horizontal tab @@ -850,24 +911,24 @@ func (p *parser) parseWhitespace(b []byte) []byte { } //nolint:cyclop -func (p *parser) parseIntOrFloatOrDateTime(b []byte) (ast.Reference, []byte, error) { +func (p *Parser) parseIntOrFloatOrDateTime(b []byte) (reference, []byte, error) { switch b[0] { case 'i': if !scanFollowsInf(b) { - return ast.InvalidReference, nil, newDecodeError(atmost(b, 3), "expected 'inf'") + return invalidReference, nil, NewParserError(atmost(b, 3), "expected 'inf'") } - return p.builder.Push(ast.Node{ - Kind: ast.Float, + return p.builder.Push(Node{ + Kind: Float, Data: b[:3], }), b[3:], nil case 'n': if !scanFollowsNan(b) { - return ast.InvalidReference, nil, newDecodeError(atmost(b, 3), "expected 'nan'") + return invalidReference, nil, NewParserError(atmost(b, 3), "expected 'nan'") } - return p.builder.Push(ast.Node{ - Kind: ast.Float, + return p.builder.Push(Node{ + Kind: Float, Data: b[:3], }), b[3:], nil case '+', '-': @@ -898,7 +959,7 @@ func (p *parser) parseIntOrFloatOrDateTime(b []byte) (ast.Reference, []byte, err return p.scanIntOrFloat(b) } -func (p *parser) scanDateTime(b []byte) (ast.Reference, []byte, error) { +func (p *Parser) scanDateTime(b []byte) (reference, []byte, error) { // scans for contiguous characters in [0-9T:Z.+-], and up to one space if // followed by a digit. hasDate := false @@ -941,30 +1002,30 @@ byteLoop: } } - var kind ast.Kind + var kind Kind if hasTime { if hasDate { if hasTz { - kind = ast.DateTime + kind = DateTime } else { - kind = ast.LocalDateTime + kind = LocalDateTime } } else { - kind = ast.LocalTime + kind = LocalTime } } else { - kind = ast.LocalDate + kind = LocalDate } - return p.builder.Push(ast.Node{ + return p.builder.Push(Node{ Kind: kind, Data: b[:i], }), b[i:], nil } //nolint:funlen,gocognit,cyclop -func (p *parser) scanIntOrFloat(b []byte) (ast.Reference, []byte, error) { +func (p *Parser) scanIntOrFloat(b []byte) (reference, []byte, error) { i := 0 if len(b) > 2 && b[0] == '0' && b[1] != '.' && b[1] != 'e' && b[1] != 'E' { @@ -990,8 +1051,8 @@ func (p *parser) scanIntOrFloat(b []byte) (ast.Reference, []byte, error) { } } - return p.builder.Push(ast.Node{ - Kind: ast.Integer, + return p.builder.Push(Node{ + Kind: Integer, Data: b[:i], }), b[i:], nil } @@ -1013,40 +1074,40 @@ func (p *parser) scanIntOrFloat(b []byte) (ast.Reference, []byte, error) { if c == 'i' { if scanFollowsInf(b[i:]) { - return p.builder.Push(ast.Node{ - Kind: ast.Float, + return p.builder.Push(Node{ + Kind: Float, Data: b[:i+3], }), b[i+3:], nil } - return ast.InvalidReference, nil, newDecodeError(b[i:i+1], "unexpected character 'i' while scanning for a number") + return invalidReference, nil, NewParserError(b[i:i+1], "unexpected character 'i' while scanning for a number") } if c == 'n' { if scanFollowsNan(b[i:]) { - return p.builder.Push(ast.Node{ - Kind: ast.Float, + return p.builder.Push(Node{ + Kind: Float, Data: b[:i+3], }), b[i+3:], nil } - return ast.InvalidReference, nil, newDecodeError(b[i:i+1], "unexpected character 'n' while scanning for a number") + return invalidReference, nil, NewParserError(b[i:i+1], "unexpected character 'n' while scanning for a number") } break } if i == 0 { - return ast.InvalidReference, b, newDecodeError(b, "incomplete number") + return invalidReference, b, NewParserError(b, "incomplete number") } - kind := ast.Integer + kind := Integer if isFloat { - kind = ast.Float + kind = Float } - return p.builder.Push(ast.Node{ + return p.builder.Push(Node{ Kind: kind, Data: b[:i], }), b[i:], nil @@ -1075,11 +1136,11 @@ func isValidBinaryRune(r byte) bool { func expect(x byte, b []byte) ([]byte, error) { if len(b) == 0 { - return nil, newDecodeError(b, "expected character %c but the document ended here", x) + return nil, NewParserError(b, "expected character %c but the document ended here", x) } if b[0] != x { - return nil, newDecodeError(b[0:1], "expected character %c", x) + return nil, NewParserError(b[0:1], "expected character %c", x) } return b[1:], nil diff --git a/test/performance/vendor/github.com/pelletier/go-toml/v2/scanner.go b/vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go similarity index 79% rename from test/performance/vendor/github.com/pelletier/go-toml/v2/scanner.go rename to vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go index bb445fab4c..af22ebbe93 100644 --- a/test/performance/vendor/github.com/pelletier/go-toml/v2/scanner.go +++ b/vendor/github.com/pelletier/go-toml/v2/unstable/scanner.go @@ -1,4 +1,6 @@ -package toml +package unstable + +import "github.com/pelletier/go-toml/v2/internal/characters" func scanFollows(b []byte, pattern string) bool { n := len(pattern) @@ -54,16 +56,16 @@ func scanLiteralString(b []byte) ([]byte, []byte, error) { case '\'': return b[:i+1], b[i+1:], nil case '\n', '\r': - return nil, nil, newDecodeError(b[i:i+1], "literal strings cannot have new lines") + return nil, nil, NewParserError(b[i:i+1], "literal strings cannot have new lines") } - size := utf8ValidNext(b[i:]) + size := characters.Utf8ValidNext(b[i:]) if size == 0 { - return nil, nil, newDecodeError(b[i:i+1], "invalid character") + return nil, nil, NewParserError(b[i:i+1], "invalid character") } i += size } - return nil, nil, newDecodeError(b[len(b):], "unterminated literal string") + return nil, nil, NewParserError(b[len(b):], "unterminated literal string") } func scanMultilineLiteralString(b []byte) ([]byte, []byte, error) { @@ -98,39 +100,39 @@ func scanMultilineLiteralString(b []byte) ([]byte, []byte, error) { i++ if i < len(b) && b[i] == '\'' { - return nil, nil, newDecodeError(b[i-3:i+1], "''' not allowed in multiline literal string") + return nil, nil, NewParserError(b[i-3:i+1], "''' not allowed in multiline literal string") } return b[:i], b[i:], nil } case '\r': if len(b) < i+2 { - return nil, nil, newDecodeError(b[len(b):], `need a \n after \r`) + return nil, nil, NewParserError(b[len(b):], `need a \n after \r`) } if b[i+1] != '\n' { - return nil, nil, newDecodeError(b[i:i+2], `need a \n after \r`) + return nil, nil, NewParserError(b[i:i+2], `need a \n after \r`) } i += 2 // skip the \n continue } - size := utf8ValidNext(b[i:]) + size := characters.Utf8ValidNext(b[i:]) if size == 0 { - return nil, nil, newDecodeError(b[i:i+1], "invalid character") + return nil, nil, NewParserError(b[i:i+1], "invalid character") } i += size } - return nil, nil, newDecodeError(b[len(b):], `multiline literal string not terminated by '''`) + return nil, nil, NewParserError(b[len(b):], `multiline literal string not terminated by '''`) } func scanWindowsNewline(b []byte) ([]byte, []byte, error) { const lenCRLF = 2 if len(b) < lenCRLF { - return nil, nil, newDecodeError(b, "windows new line expected") + return nil, nil, NewParserError(b, "windows new line expected") } if b[1] != '\n' { - return nil, nil, newDecodeError(b, `windows new line should be \r\n`) + return nil, nil, NewParserError(b, `windows new line should be \r\n`) } return b[:lenCRLF], b[lenCRLF:], nil @@ -165,11 +167,11 @@ func scanComment(b []byte) ([]byte, []byte, error) { if i+1 < len(b) && b[i+1] == '\n' { return b[:i+1], b[i+1:], nil } - return nil, nil, newDecodeError(b[i:i+1], "invalid character in comment") + return nil, nil, NewParserError(b[i:i+1], "invalid character in comment") } - size := utf8ValidNext(b[i:]) + size := characters.Utf8ValidNext(b[i:]) if size == 0 { - return nil, nil, newDecodeError(b[i:i+1], "invalid character in comment") + return nil, nil, NewParserError(b[i:i+1], "invalid character in comment") } i += size @@ -192,17 +194,17 @@ func scanBasicString(b []byte) ([]byte, bool, []byte, error) { case '"': return b[:i+1], escaped, b[i+1:], nil case '\n', '\r': - return nil, escaped, nil, newDecodeError(b[i:i+1], "basic strings cannot have new lines") + return nil, escaped, nil, NewParserError(b[i:i+1], "basic strings cannot have new lines") case '\\': if len(b) < i+2 { - return nil, escaped, nil, newDecodeError(b[i:i+1], "need a character after \\") + return nil, escaped, nil, NewParserError(b[i:i+1], "need a character after \\") } escaped = true i++ // skip the next character } } - return nil, escaped, nil, newDecodeError(b[len(b):], `basic string not terminated by "`) + return nil, escaped, nil, NewParserError(b[len(b):], `basic string not terminated by "`) } func scanMultilineBasicString(b []byte) ([]byte, bool, []byte, error) { @@ -243,27 +245,27 @@ func scanMultilineBasicString(b []byte) ([]byte, bool, []byte, error) { i++ if i < len(b) && b[i] == '"' { - return nil, escaped, nil, newDecodeError(b[i-3:i+1], `""" not allowed in multiline basic string`) + return nil, escaped, nil, NewParserError(b[i-3:i+1], `""" not allowed in multiline basic string`) } return b[:i], escaped, b[i:], nil } case '\\': if len(b) < i+2 { - return nil, escaped, nil, newDecodeError(b[len(b):], "need a character after \\") + return nil, escaped, nil, NewParserError(b[len(b):], "need a character after \\") } escaped = true i++ // skip the next character case '\r': if len(b) < i+2 { - return nil, escaped, nil, newDecodeError(b[len(b):], `need a \n after \r`) + return nil, escaped, nil, NewParserError(b[len(b):], `need a \n after \r`) } if b[i+1] != '\n' { - return nil, escaped, nil, newDecodeError(b[i:i+2], `need a \n after \r`) + return nil, escaped, nil, NewParserError(b[i:i+2], `need a \n after \r`) } i++ // skip the \n } } - return nil, escaped, nil, newDecodeError(b[len(b):], `multiline basic string not terminated by """`) + return nil, escaped, nil, NewParserError(b[len(b):], `multiline basic string not terminated by """`) } diff --git a/vendor/github.com/rs/cors/cors.go b/vendor/github.com/rs/cors/cors.go index 2ce24e3f3a..a47b7df871 100644 --- a/vendor/github.com/rs/cors/cors.go +++ b/vendor/github.com/rs/cors/cors.go @@ -62,6 +62,9 @@ type Options struct { // AllowCredentials indicates whether the request can include user credentials like // cookies, HTTP authentication or client side SSL certificates. AllowCredentials bool + // AllowPrivateNetwork indicates whether to accept cross-origin requests over a + // private network. + AllowPrivateNetwork bool // OptionsPassthrough instructs preflight to let other potential next handlers to // process the OPTIONS method. Turn this on if your application handles OPTIONS. OptionsPassthrough bool @@ -103,6 +106,7 @@ type Cors struct { // Status code to use for successful OPTIONS requests optionsSuccessStatus int allowCredentials bool + allowPrivateNetwork bool optionPassthrough bool } @@ -113,6 +117,7 @@ func New(options Options) *Cors { allowOriginFunc: options.AllowOriginFunc, allowOriginRequestFunc: options.AllowOriginRequestFunc, allowCredentials: options.AllowCredentials, + allowPrivateNetwork: options.AllowPrivateNetwork, maxAge: options.MaxAge, optionPassthrough: options.OptionsPassthrough, } @@ -282,6 +287,9 @@ func (c *Cors) handlePreflight(w http.ResponseWriter, r *http.Request) { headers.Add("Vary", "Origin") headers.Add("Vary", "Access-Control-Request-Method") headers.Add("Vary", "Access-Control-Request-Headers") + if c.allowPrivateNetwork { + headers.Add("Vary", "Access-Control-Request-Private-Network") + } if origin == "" { c.logf(" Preflight aborted: empty origin") @@ -319,6 +327,9 @@ func (c *Cors) handlePreflight(w http.ResponseWriter, r *http.Request) { if c.allowCredentials { headers.Set("Access-Control-Allow-Credentials", "true") } + if c.allowPrivateNetwork && r.Header.Get("Access-Control-Request-Private-Network") == "true" { + headers.Set("Access-Control-Allow-Private-Network", "true") + } if c.maxAge > 0 { headers.Set("Access-Control-Max-Age", strconv.Itoa(c.maxAge)) } diff --git a/vendor/github.com/sergi/go-diff/AUTHORS b/vendor/github.com/sergi/go-diff/AUTHORS new file mode 100644 index 0000000000..2d7bb2bf57 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/AUTHORS @@ -0,0 +1,25 @@ +# This is the official list of go-diff authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Danny Yoo +James Kolb +Jonathan Amsterdam +Markus Zimmermann +Matt Kovars +Örjan Persson +Osman Masood +Robert Carlsen +Rory Flynn +Sergi Mansilla +Shatrugna Sadhu +Shawn Smith +Stas Maksimov +Tor Arvid Lund +Zac Bergquist diff --git a/vendor/github.com/sergi/go-diff/CONTRIBUTORS b/vendor/github.com/sergi/go-diff/CONTRIBUTORS new file mode 100644 index 0000000000..369e3d5519 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/CONTRIBUTORS @@ -0,0 +1,32 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the go-diff +# repository. +# +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, ACME Inc. employees would be listed here +# but not in AUTHORS, because ACME Inc. would hold the copyright. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file. +# +# Names should be added to this file like so: +# Name +# +# Please keep the list sorted. + +Danny Yoo +James Kolb +Jonathan Amsterdam +Markus Zimmermann +Matt Kovars +Örjan Persson +Osman Masood +Robert Carlsen +Rory Flynn +Sergi Mansilla +Shatrugna Sadhu +Shawn Smith +Stas Maksimov +Tor Arvid Lund +Zac Bergquist diff --git a/vendor/github.com/sergi/go-diff/LICENSE b/vendor/github.com/sergi/go-diff/LICENSE new file mode 100644 index 0000000000..937942c2b2 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2012-2016 The go-diff Authors. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go new file mode 100644 index 0000000000..2a9f2dc3b9 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go @@ -0,0 +1,1352 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "bytes" + "errors" + "fmt" + "html" + "math" + "net/url" + "regexp" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// Operation defines the operation of a diff item. +type Operation int8 + +//go:generate stringer -type=Operation -trimprefix=Diff + +const ( + // DiffDelete item represents a delete diff. + DiffDelete Operation = -1 + // DiffInsert item represents an insert diff. + DiffInsert Operation = 1 + // DiffEqual item represents an equal diff. + DiffEqual Operation = 0 + //IndexSeparator is used to seperate the array indexes in an index string + IndexSeparator = "," +) + +// Diff represents one diff operation +type Diff struct { + Type Operation + Text string +} + +// splice removes amount elements from slice at index index, replacing them with elements. +func splice(slice []Diff, index int, amount int, elements ...Diff) []Diff { + if len(elements) == amount { + // Easy case: overwrite the relevant items. + copy(slice[index:], elements) + return slice + } + if len(elements) < amount { + // Fewer new items than old. + // Copy in the new items. + copy(slice[index:], elements) + // Shift the remaining items left. + copy(slice[index+len(elements):], slice[index+amount:]) + // Calculate the new end of the slice. + end := len(slice) - amount + len(elements) + // Zero stranded elements at end so that they can be garbage collected. + tail := slice[end:] + for i := range tail { + tail[i] = Diff{} + } + return slice[:end] + } + // More new items than old. + // Make room in slice for new elements. + // There's probably an even more efficient way to do this, + // but this is simple and clear. + need := len(slice) - amount + len(elements) + for len(slice) < need { + slice = append(slice, Diff{}) + } + // Shift slice elements right to make room for new elements. + copy(slice[index+len(elements):], slice[index+amount:]) + // Copy in new elements. + copy(slice[index:], elements) + return slice +} + +// DiffMain finds the differences between two texts. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +func (dmp *DiffMatchPatch) DiffMain(text1, text2 string, checklines bool) []Diff { + return dmp.DiffMainRunes([]rune(text1), []rune(text2), checklines) +} + +// DiffMainRunes finds the differences between two rune sequences. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +func (dmp *DiffMatchPatch) DiffMainRunes(text1, text2 []rune, checklines bool) []Diff { + var deadline time.Time + if dmp.DiffTimeout > 0 { + deadline = time.Now().Add(dmp.DiffTimeout) + } + return dmp.diffMainRunes(text1, text2, checklines, deadline) +} + +func (dmp *DiffMatchPatch) diffMainRunes(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { + if runesEqual(text1, text2) { + var diffs []Diff + if len(text1) > 0 { + diffs = append(diffs, Diff{DiffEqual, string(text1)}) + } + return diffs + } + // Trim off common prefix (speedup). + commonlength := commonPrefixLength(text1, text2) + commonprefix := text1[:commonlength] + text1 = text1[commonlength:] + text2 = text2[commonlength:] + + // Trim off common suffix (speedup). + commonlength = commonSuffixLength(text1, text2) + commonsuffix := text1[len(text1)-commonlength:] + text1 = text1[:len(text1)-commonlength] + text2 = text2[:len(text2)-commonlength] + + // Compute the diff on the middle block. + diffs := dmp.diffCompute(text1, text2, checklines, deadline) + + // Restore the prefix and suffix. + if len(commonprefix) != 0 { + diffs = append([]Diff{{DiffEqual, string(commonprefix)}}, diffs...) + } + if len(commonsuffix) != 0 { + diffs = append(diffs, Diff{DiffEqual, string(commonsuffix)}) + } + + return dmp.DiffCleanupMerge(diffs) +} + +// diffCompute finds the differences between two rune slices. Assumes that the texts do not have any common prefix or suffix. +func (dmp *DiffMatchPatch) diffCompute(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { + diffs := []Diff{} + if len(text1) == 0 { + // Just add some text (speedup). + return append(diffs, Diff{DiffInsert, string(text2)}) + } else if len(text2) == 0 { + // Just delete some text (speedup). + return append(diffs, Diff{DiffDelete, string(text1)}) + } + + var longtext, shorttext []rune + if len(text1) > len(text2) { + longtext = text1 + shorttext = text2 + } else { + longtext = text2 + shorttext = text1 + } + + if i := runesIndex(longtext, shorttext); i != -1 { + op := DiffInsert + // Swap insertions for deletions if diff is reversed. + if len(text1) > len(text2) { + op = DiffDelete + } + // Shorter text is inside the longer text (speedup). + return []Diff{ + Diff{op, string(longtext[:i])}, + Diff{DiffEqual, string(shorttext)}, + Diff{op, string(longtext[i+len(shorttext):])}, + } + } else if len(shorttext) == 1 { + // Single character string. + // After the previous speedup, the character can't be an equality. + return []Diff{ + {DiffDelete, string(text1)}, + {DiffInsert, string(text2)}, + } + // Check to see if the problem can be split in two. + } else if hm := dmp.diffHalfMatch(text1, text2); hm != nil { + // A half-match was found, sort out the return data. + text1A := hm[0] + text1B := hm[1] + text2A := hm[2] + text2B := hm[3] + midCommon := hm[4] + // Send both pairs off for separate processing. + diffsA := dmp.diffMainRunes(text1A, text2A, checklines, deadline) + diffsB := dmp.diffMainRunes(text1B, text2B, checklines, deadline) + // Merge the results. + diffs := diffsA + diffs = append(diffs, Diff{DiffEqual, string(midCommon)}) + diffs = append(diffs, diffsB...) + return diffs + } else if checklines && len(text1) > 100 && len(text2) > 100 { + return dmp.diffLineMode(text1, text2, deadline) + } + return dmp.diffBisect(text1, text2, deadline) +} + +// diffLineMode does a quick line-level diff on both []runes, then rediff the parts for greater accuracy. This speedup can produce non-minimal diffs. +func (dmp *DiffMatchPatch) diffLineMode(text1, text2 []rune, deadline time.Time) []Diff { + // Scan the text on a line-by-line basis first. + text1, text2, linearray := dmp.DiffLinesToRunes(string(text1), string(text2)) + + diffs := dmp.diffMainRunes(text1, text2, false, deadline) + + // Convert the diff back to original text. + diffs = dmp.DiffCharsToLines(diffs, linearray) + // Eliminate freak matches (e.g. blank lines) + diffs = dmp.DiffCleanupSemantic(diffs) + + // Rediff any replacement blocks, this time character-by-character. + // Add a dummy entry at the end. + diffs = append(diffs, Diff{DiffEqual, ""}) + + pointer := 0 + countDelete := 0 + countInsert := 0 + + // NOTE: Rune slices are slower than using strings in this case. + textDelete := "" + textInsert := "" + + for pointer < len(diffs) { + switch diffs[pointer].Type { + case DiffInsert: + countInsert++ + textInsert += diffs[pointer].Text + case DiffDelete: + countDelete++ + textDelete += diffs[pointer].Text + case DiffEqual: + // Upon reaching an equality, check for prior redundancies. + if countDelete >= 1 && countInsert >= 1 { + // Delete the offending records and add the merged ones. + diffs = splice(diffs, pointer-countDelete-countInsert, + countDelete+countInsert) + + pointer = pointer - countDelete - countInsert + a := dmp.diffMainRunes([]rune(textDelete), []rune(textInsert), false, deadline) + for j := len(a) - 1; j >= 0; j-- { + diffs = splice(diffs, pointer, 0, a[j]) + } + pointer = pointer + len(a) + } + + countInsert = 0 + countDelete = 0 + textDelete = "" + textInsert = "" + } + pointer++ + } + + return diffs[:len(diffs)-1] // Remove the dummy entry at the end. +} + +// DiffBisect finds the 'middle snake' of a diff, split the problem in two and return the recursively constructed diff. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +// See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations. +func (dmp *DiffMatchPatch) DiffBisect(text1, text2 string, deadline time.Time) []Diff { + // Unused in this code, but retained for interface compatibility. + return dmp.diffBisect([]rune(text1), []rune(text2), deadline) +} + +// diffBisect finds the 'middle snake' of a diff, splits the problem in two and returns the recursively constructed diff. +// See Myers's 1986 paper: An O(ND) Difference Algorithm and Its Variations. +func (dmp *DiffMatchPatch) diffBisect(runes1, runes2 []rune, deadline time.Time) []Diff { + // Cache the text lengths to prevent multiple calls. + runes1Len, runes2Len := len(runes1), len(runes2) + + maxD := (runes1Len + runes2Len + 1) / 2 + vOffset := maxD + vLength := 2 * maxD + + v1 := make([]int, vLength) + v2 := make([]int, vLength) + for i := range v1 { + v1[i] = -1 + v2[i] = -1 + } + v1[vOffset+1] = 0 + v2[vOffset+1] = 0 + + delta := runes1Len - runes2Len + // If the total number of characters is odd, then the front path will collide with the reverse path. + front := (delta%2 != 0) + // Offsets for start and end of k loop. Prevents mapping of space beyond the grid. + k1start := 0 + k1end := 0 + k2start := 0 + k2end := 0 + for d := 0; d < maxD; d++ { + // Bail out if deadline is reached. + if !deadline.IsZero() && d%16 == 0 && time.Now().After(deadline) { + break + } + + // Walk the front path one step. + for k1 := -d + k1start; k1 <= d-k1end; k1 += 2 { + k1Offset := vOffset + k1 + var x1 int + + if k1 == -d || (k1 != d && v1[k1Offset-1] < v1[k1Offset+1]) { + x1 = v1[k1Offset+1] + } else { + x1 = v1[k1Offset-1] + 1 + } + + y1 := x1 - k1 + for x1 < runes1Len && y1 < runes2Len { + if runes1[x1] != runes2[y1] { + break + } + x1++ + y1++ + } + v1[k1Offset] = x1 + if x1 > runes1Len { + // Ran off the right of the graph. + k1end += 2 + } else if y1 > runes2Len { + // Ran off the bottom of the graph. + k1start += 2 + } else if front { + k2Offset := vOffset + delta - k1 + if k2Offset >= 0 && k2Offset < vLength && v2[k2Offset] != -1 { + // Mirror x2 onto top-left coordinate system. + x2 := runes1Len - v2[k2Offset] + if x1 >= x2 { + // Overlap detected. + return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline) + } + } + } + } + // Walk the reverse path one step. + for k2 := -d + k2start; k2 <= d-k2end; k2 += 2 { + k2Offset := vOffset + k2 + var x2 int + if k2 == -d || (k2 != d && v2[k2Offset-1] < v2[k2Offset+1]) { + x2 = v2[k2Offset+1] + } else { + x2 = v2[k2Offset-1] + 1 + } + var y2 = x2 - k2 + for x2 < runes1Len && y2 < runes2Len { + if runes1[runes1Len-x2-1] != runes2[runes2Len-y2-1] { + break + } + x2++ + y2++ + } + v2[k2Offset] = x2 + if x2 > runes1Len { + // Ran off the left of the graph. + k2end += 2 + } else if y2 > runes2Len { + // Ran off the top of the graph. + k2start += 2 + } else if !front { + k1Offset := vOffset + delta - k2 + if k1Offset >= 0 && k1Offset < vLength && v1[k1Offset] != -1 { + x1 := v1[k1Offset] + y1 := vOffset + x1 - k1Offset + // Mirror x2 onto top-left coordinate system. + x2 = runes1Len - x2 + if x1 >= x2 { + // Overlap detected. + return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline) + } + } + } + } + } + // Diff took too long and hit the deadline or number of diffs equals number of characters, no commonality at all. + return []Diff{ + {DiffDelete, string(runes1)}, + {DiffInsert, string(runes2)}, + } +} + +func (dmp *DiffMatchPatch) diffBisectSplit(runes1, runes2 []rune, x, y int, + deadline time.Time) []Diff { + runes1a := runes1[:x] + runes2a := runes2[:y] + runes1b := runes1[x:] + runes2b := runes2[y:] + + // Compute both diffs serially. + diffs := dmp.diffMainRunes(runes1a, runes2a, false, deadline) + diffsb := dmp.diffMainRunes(runes1b, runes2b, false, deadline) + + return append(diffs, diffsb...) +} + +// DiffLinesToChars splits two texts into a list of strings, and educes the texts to a string of hashes where each Unicode character represents one line. +// It's slightly faster to call DiffLinesToRunes first, followed by DiffMainRunes. +func (dmp *DiffMatchPatch) DiffLinesToChars(text1, text2 string) (string, string, []string) { + chars1, chars2, lineArray := dmp.diffLinesToStrings(text1, text2) + return chars1, chars2, lineArray +} + +// DiffLinesToRunes splits two texts into a list of runes. +func (dmp *DiffMatchPatch) DiffLinesToRunes(text1, text2 string) ([]rune, []rune, []string) { + chars1, chars2, lineArray := dmp.diffLinesToStrings(text1, text2) + return []rune(chars1), []rune(chars2), lineArray +} + +// DiffCharsToLines rehydrates the text in a diff from a string of line hashes to real lines of text. +func (dmp *DiffMatchPatch) DiffCharsToLines(diffs []Diff, lineArray []string) []Diff { + hydrated := make([]Diff, 0, len(diffs)) + for _, aDiff := range diffs { + chars := strings.Split(aDiff.Text, IndexSeparator) + text := make([]string, len(chars)) + + for i, r := range chars { + i1, err := strconv.Atoi(r) + if err == nil { + text[i] = lineArray[i1] + } + } + + aDiff.Text = strings.Join(text, "") + hydrated = append(hydrated, aDiff) + } + return hydrated +} + +// DiffCommonPrefix determines the common prefix length of two strings. +func (dmp *DiffMatchPatch) DiffCommonPrefix(text1, text2 string) int { + // Unused in this code, but retained for interface compatibility. + return commonPrefixLength([]rune(text1), []rune(text2)) +} + +// DiffCommonSuffix determines the common suffix length of two strings. +func (dmp *DiffMatchPatch) DiffCommonSuffix(text1, text2 string) int { + // Unused in this code, but retained for interface compatibility. + return commonSuffixLength([]rune(text1), []rune(text2)) +} + +// commonPrefixLength returns the length of the common prefix of two rune slices. +func commonPrefixLength(text1, text2 []rune) int { + // Linear search. See comment in commonSuffixLength. + n := 0 + for ; n < len(text1) && n < len(text2); n++ { + if text1[n] != text2[n] { + return n + } + } + return n +} + +// commonSuffixLength returns the length of the common suffix of two rune slices. +func commonSuffixLength(text1, text2 []rune) int { + // Use linear search rather than the binary search discussed at https://neil.fraser.name/news/2007/10/09/. + // See discussion at https://github.com/sergi/go-diff/issues/54. + i1 := len(text1) + i2 := len(text2) + for n := 0; ; n++ { + i1-- + i2-- + if i1 < 0 || i2 < 0 || text1[i1] != text2[i2] { + return n + } + } +} + +// DiffCommonOverlap determines if the suffix of one string is the prefix of another. +func (dmp *DiffMatchPatch) DiffCommonOverlap(text1 string, text2 string) int { + // Cache the text lengths to prevent multiple calls. + text1Length := len(text1) + text2Length := len(text2) + // Eliminate the null case. + if text1Length == 0 || text2Length == 0 { + return 0 + } + // Truncate the longer string. + if text1Length > text2Length { + text1 = text1[text1Length-text2Length:] + } else if text1Length < text2Length { + text2 = text2[0:text1Length] + } + textLength := int(math.Min(float64(text1Length), float64(text2Length))) + // Quick check for the worst case. + if text1 == text2 { + return textLength + } + + // Start by looking for a single character match and increase length until no match is found. Performance analysis: http://neil.fraser.name/news/2010/11/04/ + best := 0 + length := 1 + for { + pattern := text1[textLength-length:] + found := strings.Index(text2, pattern) + if found == -1 { + break + } + length += found + if found == 0 || text1[textLength-length:] == text2[0:length] { + best = length + length++ + } + } + + return best +} + +// DiffHalfMatch checks whether the two texts share a substring which is at least half the length of the longer text. This speedup can produce non-minimal diffs. +func (dmp *DiffMatchPatch) DiffHalfMatch(text1, text2 string) []string { + // Unused in this code, but retained for interface compatibility. + runeSlices := dmp.diffHalfMatch([]rune(text1), []rune(text2)) + if runeSlices == nil { + return nil + } + + result := make([]string, len(runeSlices)) + for i, r := range runeSlices { + result[i] = string(r) + } + return result +} + +func (dmp *DiffMatchPatch) diffHalfMatch(text1, text2 []rune) [][]rune { + if dmp.DiffTimeout <= 0 { + // Don't risk returning a non-optimal diff if we have unlimited time. + return nil + } + + var longtext, shorttext []rune + if len(text1) > len(text2) { + longtext = text1 + shorttext = text2 + } else { + longtext = text2 + shorttext = text1 + } + + if len(longtext) < 4 || len(shorttext)*2 < len(longtext) { + return nil // Pointless. + } + + // First check if the second quarter is the seed for a half-match. + hm1 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+3)/4)) + + // Check again based on the third quarter. + hm2 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+1)/2)) + + hm := [][]rune{} + if hm1 == nil && hm2 == nil { + return nil + } else if hm2 == nil { + hm = hm1 + } else if hm1 == nil { + hm = hm2 + } else { + // Both matched. Select the longest. + if len(hm1[4]) > len(hm2[4]) { + hm = hm1 + } else { + hm = hm2 + } + } + + // A half-match was found, sort out the return data. + if len(text1) > len(text2) { + return hm + } + + return [][]rune{hm[2], hm[3], hm[0], hm[1], hm[4]} +} + +// diffHalfMatchI checks if a substring of shorttext exist within longtext such that the substring is at least half the length of longtext? +// Returns a slice containing the prefix of longtext, the suffix of longtext, the prefix of shorttext, the suffix of shorttext and the common middle, or null if there was no match. +func (dmp *DiffMatchPatch) diffHalfMatchI(l, s []rune, i int) [][]rune { + var bestCommonA []rune + var bestCommonB []rune + var bestCommonLen int + var bestLongtextA []rune + var bestLongtextB []rune + var bestShorttextA []rune + var bestShorttextB []rune + + // Start with a 1/4 length substring at position i as a seed. + seed := l[i : i+len(l)/4] + + for j := runesIndexOf(s, seed, 0); j != -1; j = runesIndexOf(s, seed, j+1) { + prefixLength := commonPrefixLength(l[i:], s[j:]) + suffixLength := commonSuffixLength(l[:i], s[:j]) + + if bestCommonLen < suffixLength+prefixLength { + bestCommonA = s[j-suffixLength : j] + bestCommonB = s[j : j+prefixLength] + bestCommonLen = len(bestCommonA) + len(bestCommonB) + bestLongtextA = l[:i-suffixLength] + bestLongtextB = l[i+prefixLength:] + bestShorttextA = s[:j-suffixLength] + bestShorttextB = s[j+prefixLength:] + } + } + + if bestCommonLen*2 < len(l) { + return nil + } + + return [][]rune{ + bestLongtextA, + bestLongtextB, + bestShorttextA, + bestShorttextB, + append(bestCommonA, bestCommonB...), + } +} + +// DiffCleanupSemantic reduces the number of edits by eliminating semantically trivial equalities. +func (dmp *DiffMatchPatch) DiffCleanupSemantic(diffs []Diff) []Diff { + changes := false + // Stack of indices where equalities are found. + equalities := make([]int, 0, len(diffs)) + + var lastequality string + // Always equal to diffs[equalities[equalitiesLength - 1]][1] + var pointer int // Index of current position. + // Number of characters that changed prior to the equality. + var lengthInsertions1, lengthDeletions1 int + // Number of characters that changed after the equality. + var lengthInsertions2, lengthDeletions2 int + + for pointer < len(diffs) { + if diffs[pointer].Type == DiffEqual { + // Equality found. + equalities = append(equalities, pointer) + lengthInsertions1 = lengthInsertions2 + lengthDeletions1 = lengthDeletions2 + lengthInsertions2 = 0 + lengthDeletions2 = 0 + lastequality = diffs[pointer].Text + } else { + // An insertion or deletion. + + if diffs[pointer].Type == DiffInsert { + lengthInsertions2 += utf8.RuneCountInString(diffs[pointer].Text) + } else { + lengthDeletions2 += utf8.RuneCountInString(diffs[pointer].Text) + } + // Eliminate an equality that is smaller or equal to the edits on both sides of it. + difference1 := int(math.Max(float64(lengthInsertions1), float64(lengthDeletions1))) + difference2 := int(math.Max(float64(lengthInsertions2), float64(lengthDeletions2))) + if utf8.RuneCountInString(lastequality) > 0 && + (utf8.RuneCountInString(lastequality) <= difference1) && + (utf8.RuneCountInString(lastequality) <= difference2) { + // Duplicate record. + insPoint := equalities[len(equalities)-1] + diffs = splice(diffs, insPoint, 0, Diff{DiffDelete, lastequality}) + + // Change second copy to insert. + diffs[insPoint+1].Type = DiffInsert + // Throw away the equality we just deleted. + equalities = equalities[:len(equalities)-1] + + if len(equalities) > 0 { + equalities = equalities[:len(equalities)-1] + } + pointer = -1 + if len(equalities) > 0 { + pointer = equalities[len(equalities)-1] + } + + lengthInsertions1 = 0 // Reset the counters. + lengthDeletions1 = 0 + lengthInsertions2 = 0 + lengthDeletions2 = 0 + lastequality = "" + changes = true + } + } + pointer++ + } + + // Normalize the diff. + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + diffs = dmp.DiffCleanupSemanticLossless(diffs) + // Find any overlaps between deletions and insertions. + // e.g: abcxxxxxxdef + // -> abcxxxdef + // e.g: xxxabcdefxxx + // -> defxxxabc + // Only extract an overlap if it is as big as the edit ahead or behind it. + pointer = 1 + for pointer < len(diffs) { + if diffs[pointer-1].Type == DiffDelete && + diffs[pointer].Type == DiffInsert { + deletion := diffs[pointer-1].Text + insertion := diffs[pointer].Text + overlapLength1 := dmp.DiffCommonOverlap(deletion, insertion) + overlapLength2 := dmp.DiffCommonOverlap(insertion, deletion) + if overlapLength1 >= overlapLength2 { + if float64(overlapLength1) >= float64(utf8.RuneCountInString(deletion))/2 || + float64(overlapLength1) >= float64(utf8.RuneCountInString(insertion))/2 { + + // Overlap found. Insert an equality and trim the surrounding edits. + diffs = splice(diffs, pointer, 0, Diff{DiffEqual, insertion[:overlapLength1]}) + diffs[pointer-1].Text = + deletion[0 : len(deletion)-overlapLength1] + diffs[pointer+1].Text = insertion[overlapLength1:] + pointer++ + } + } else { + if float64(overlapLength2) >= float64(utf8.RuneCountInString(deletion))/2 || + float64(overlapLength2) >= float64(utf8.RuneCountInString(insertion))/2 { + // Reverse overlap found. Insert an equality and swap and trim the surrounding edits. + overlap := Diff{DiffEqual, deletion[:overlapLength2]} + diffs = splice(diffs, pointer, 0, overlap) + diffs[pointer-1].Type = DiffInsert + diffs[pointer-1].Text = insertion[0 : len(insertion)-overlapLength2] + diffs[pointer+1].Type = DiffDelete + diffs[pointer+1].Text = deletion[overlapLength2:] + pointer++ + } + } + pointer++ + } + pointer++ + } + + return diffs +} + +// Define some regex patterns for matching boundaries. +var ( + nonAlphaNumericRegex = regexp.MustCompile(`[^a-zA-Z0-9]`) + whitespaceRegex = regexp.MustCompile(`\s`) + linebreakRegex = regexp.MustCompile(`[\r\n]`) + blanklineEndRegex = regexp.MustCompile(`\n\r?\n$`) + blanklineStartRegex = regexp.MustCompile(`^\r?\n\r?\n`) +) + +// diffCleanupSemanticScore computes a score representing whether the internal boundary falls on logical boundaries. +// Scores range from 6 (best) to 0 (worst). Closure, but does not reference any external variables. +func diffCleanupSemanticScore(one, two string) int { + if len(one) == 0 || len(two) == 0 { + // Edges are the best. + return 6 + } + + // Each port of this function behaves slightly differently due to subtle differences in each language's definition of things like 'whitespace'. Since this function's purpose is largely cosmetic, the choice has been made to use each language's native features rather than force total conformity. + rune1, _ := utf8.DecodeLastRuneInString(one) + rune2, _ := utf8.DecodeRuneInString(two) + char1 := string(rune1) + char2 := string(rune2) + + nonAlphaNumeric1 := nonAlphaNumericRegex.MatchString(char1) + nonAlphaNumeric2 := nonAlphaNumericRegex.MatchString(char2) + whitespace1 := nonAlphaNumeric1 && whitespaceRegex.MatchString(char1) + whitespace2 := nonAlphaNumeric2 && whitespaceRegex.MatchString(char2) + lineBreak1 := whitespace1 && linebreakRegex.MatchString(char1) + lineBreak2 := whitespace2 && linebreakRegex.MatchString(char2) + blankLine1 := lineBreak1 && blanklineEndRegex.MatchString(one) + blankLine2 := lineBreak2 && blanklineEndRegex.MatchString(two) + + if blankLine1 || blankLine2 { + // Five points for blank lines. + return 5 + } else if lineBreak1 || lineBreak2 { + // Four points for line breaks. + return 4 + } else if nonAlphaNumeric1 && !whitespace1 && whitespace2 { + // Three points for end of sentences. + return 3 + } else if whitespace1 || whitespace2 { + // Two points for whitespace. + return 2 + } else if nonAlphaNumeric1 || nonAlphaNumeric2 { + // One point for non-alphanumeric. + return 1 + } + return 0 +} + +// DiffCleanupSemanticLossless looks for single edits surrounded on both sides by equalities which can be shifted sideways to align the edit to a word boundary. +// E.g: The cat came. -> The cat came. +func (dmp *DiffMatchPatch) DiffCleanupSemanticLossless(diffs []Diff) []Diff { + pointer := 1 + + // Intentionally ignore the first and last element (don't need checking). + for pointer < len(diffs)-1 { + if diffs[pointer-1].Type == DiffEqual && + diffs[pointer+1].Type == DiffEqual { + + // This is a single edit surrounded by equalities. + equality1 := diffs[pointer-1].Text + edit := diffs[pointer].Text + equality2 := diffs[pointer+1].Text + + // First, shift the edit as far left as possible. + commonOffset := dmp.DiffCommonSuffix(equality1, edit) + if commonOffset > 0 { + commonString := edit[len(edit)-commonOffset:] + equality1 = equality1[0 : len(equality1)-commonOffset] + edit = commonString + edit[:len(edit)-commonOffset] + equality2 = commonString + equality2 + } + + // Second, step character by character right, looking for the best fit. + bestEquality1 := equality1 + bestEdit := edit + bestEquality2 := equality2 + bestScore := diffCleanupSemanticScore(equality1, edit) + + diffCleanupSemanticScore(edit, equality2) + + for len(edit) != 0 && len(equality2) != 0 { + _, sz := utf8.DecodeRuneInString(edit) + if len(equality2) < sz || edit[:sz] != equality2[:sz] { + break + } + equality1 += edit[:sz] + edit = edit[sz:] + equality2[:sz] + equality2 = equality2[sz:] + score := diffCleanupSemanticScore(equality1, edit) + + diffCleanupSemanticScore(edit, equality2) + // The >= encourages trailing rather than leading whitespace on edits. + if score >= bestScore { + bestScore = score + bestEquality1 = equality1 + bestEdit = edit + bestEquality2 = equality2 + } + } + + if diffs[pointer-1].Text != bestEquality1 { + // We have an improvement, save it back to the diff. + if len(bestEquality1) != 0 { + diffs[pointer-1].Text = bestEquality1 + } else { + diffs = splice(diffs, pointer-1, 1) + pointer-- + } + + diffs[pointer].Text = bestEdit + if len(bestEquality2) != 0 { + diffs[pointer+1].Text = bestEquality2 + } else { + diffs = append(diffs[:pointer+1], diffs[pointer+2:]...) + pointer-- + } + } + } + pointer++ + } + + return diffs +} + +// DiffCleanupEfficiency reduces the number of edits by eliminating operationally trivial equalities. +func (dmp *DiffMatchPatch) DiffCleanupEfficiency(diffs []Diff) []Diff { + changes := false + // Stack of indices where equalities are found. + type equality struct { + data int + next *equality + } + var equalities *equality + // Always equal to equalities[equalitiesLength-1][1] + lastequality := "" + pointer := 0 // Index of current position. + // Is there an insertion operation before the last equality. + preIns := false + // Is there a deletion operation before the last equality. + preDel := false + // Is there an insertion operation after the last equality. + postIns := false + // Is there a deletion operation after the last equality. + postDel := false + for pointer < len(diffs) { + if diffs[pointer].Type == DiffEqual { // Equality found. + if len(diffs[pointer].Text) < dmp.DiffEditCost && + (postIns || postDel) { + // Candidate found. + equalities = &equality{ + data: pointer, + next: equalities, + } + preIns = postIns + preDel = postDel + lastequality = diffs[pointer].Text + } else { + // Not a candidate, and can never become one. + equalities = nil + lastequality = "" + } + postIns = false + postDel = false + } else { // An insertion or deletion. + if diffs[pointer].Type == DiffDelete { + postDel = true + } else { + postIns = true + } + + // Five types to be split: + // ABXYCD + // AXCD + // ABXC + // AXCD + // ABXC + var sumPres int + if preIns { + sumPres++ + } + if preDel { + sumPres++ + } + if postIns { + sumPres++ + } + if postDel { + sumPres++ + } + if len(lastequality) > 0 && + ((preIns && preDel && postIns && postDel) || + ((len(lastequality) < dmp.DiffEditCost/2) && sumPres == 3)) { + + insPoint := equalities.data + + // Duplicate record. + diffs = splice(diffs, insPoint, 0, Diff{DiffDelete, lastequality}) + + // Change second copy to insert. + diffs[insPoint+1].Type = DiffInsert + // Throw away the equality we just deleted. + equalities = equalities.next + lastequality = "" + + if preIns && preDel { + // No changes made which could affect previous entry, keep going. + postIns = true + postDel = true + equalities = nil + } else { + if equalities != nil { + equalities = equalities.next + } + if equalities != nil { + pointer = equalities.data + } else { + pointer = -1 + } + postIns = false + postDel = false + } + changes = true + } + } + pointer++ + } + + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + + return diffs +} + +// DiffCleanupMerge reorders and merges like edit sections. Merge equalities. +// Any edit section can move as long as it doesn't cross an equality. +func (dmp *DiffMatchPatch) DiffCleanupMerge(diffs []Diff) []Diff { + // Add a dummy entry at the end. + diffs = append(diffs, Diff{DiffEqual, ""}) + pointer := 0 + countDelete := 0 + countInsert := 0 + commonlength := 0 + textDelete := []rune(nil) + textInsert := []rune(nil) + + for pointer < len(diffs) { + switch diffs[pointer].Type { + case DiffInsert: + countInsert++ + textInsert = append(textInsert, []rune(diffs[pointer].Text)...) + pointer++ + break + case DiffDelete: + countDelete++ + textDelete = append(textDelete, []rune(diffs[pointer].Text)...) + pointer++ + break + case DiffEqual: + // Upon reaching an equality, check for prior redundancies. + if countDelete+countInsert > 1 { + if countDelete != 0 && countInsert != 0 { + // Factor out any common prefixies. + commonlength = commonPrefixLength(textInsert, textDelete) + if commonlength != 0 { + x := pointer - countDelete - countInsert + if x > 0 && diffs[x-1].Type == DiffEqual { + diffs[x-1].Text += string(textInsert[:commonlength]) + } else { + diffs = append([]Diff{{DiffEqual, string(textInsert[:commonlength])}}, diffs...) + pointer++ + } + textInsert = textInsert[commonlength:] + textDelete = textDelete[commonlength:] + } + // Factor out any common suffixies. + commonlength = commonSuffixLength(textInsert, textDelete) + if commonlength != 0 { + insertIndex := len(textInsert) - commonlength + deleteIndex := len(textDelete) - commonlength + diffs[pointer].Text = string(textInsert[insertIndex:]) + diffs[pointer].Text + textInsert = textInsert[:insertIndex] + textDelete = textDelete[:deleteIndex] + } + } + // Delete the offending records and add the merged ones. + if countDelete == 0 { + diffs = splice(diffs, pointer-countInsert, + countDelete+countInsert, + Diff{DiffInsert, string(textInsert)}) + } else if countInsert == 0 { + diffs = splice(diffs, pointer-countDelete, + countDelete+countInsert, + Diff{DiffDelete, string(textDelete)}) + } else { + diffs = splice(diffs, pointer-countDelete-countInsert, + countDelete+countInsert, + Diff{DiffDelete, string(textDelete)}, + Diff{DiffInsert, string(textInsert)}) + } + + pointer = pointer - countDelete - countInsert + 1 + if countDelete != 0 { + pointer++ + } + if countInsert != 0 { + pointer++ + } + } else if pointer != 0 && diffs[pointer-1].Type == DiffEqual { + // Merge this equality with the previous one. + diffs[pointer-1].Text += diffs[pointer].Text + diffs = append(diffs[:pointer], diffs[pointer+1:]...) + } else { + pointer++ + } + countInsert = 0 + countDelete = 0 + textDelete = nil + textInsert = nil + break + } + } + + if len(diffs[len(diffs)-1].Text) == 0 { + diffs = diffs[0 : len(diffs)-1] // Remove the dummy entry at the end. + } + + // Second pass: look for single edits surrounded on both sides by equalities which can be shifted sideways to eliminate an equality. E.g: ABAC -> ABAC + changes := false + pointer = 1 + // Intentionally ignore the first and last element (don't need checking). + for pointer < (len(diffs) - 1) { + if diffs[pointer-1].Type == DiffEqual && + diffs[pointer+1].Type == DiffEqual { + // This is a single edit surrounded by equalities. + if strings.HasSuffix(diffs[pointer].Text, diffs[pointer-1].Text) { + // Shift the edit over the previous equality. + diffs[pointer].Text = diffs[pointer-1].Text + + diffs[pointer].Text[:len(diffs[pointer].Text)-len(diffs[pointer-1].Text)] + diffs[pointer+1].Text = diffs[pointer-1].Text + diffs[pointer+1].Text + diffs = splice(diffs, pointer-1, 1) + changes = true + } else if strings.HasPrefix(diffs[pointer].Text, diffs[pointer+1].Text) { + // Shift the edit over the next equality. + diffs[pointer-1].Text += diffs[pointer+1].Text + diffs[pointer].Text = + diffs[pointer].Text[len(diffs[pointer+1].Text):] + diffs[pointer+1].Text + diffs = splice(diffs, pointer+1, 1) + changes = true + } + } + pointer++ + } + + // If shifts were made, the diff needs reordering and another shift sweep. + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + + return diffs +} + +// DiffXIndex returns the equivalent location in s2. +func (dmp *DiffMatchPatch) DiffXIndex(diffs []Diff, loc int) int { + chars1 := 0 + chars2 := 0 + lastChars1 := 0 + lastChars2 := 0 + lastDiff := Diff{} + for i := 0; i < len(diffs); i++ { + aDiff := diffs[i] + if aDiff.Type != DiffInsert { + // Equality or deletion. + chars1 += len(aDiff.Text) + } + if aDiff.Type != DiffDelete { + // Equality or insertion. + chars2 += len(aDiff.Text) + } + if chars1 > loc { + // Overshot the location. + lastDiff = aDiff + break + } + lastChars1 = chars1 + lastChars2 = chars2 + } + if lastDiff.Type == DiffDelete { + // The location was deleted. + return lastChars2 + } + // Add the remaining character length. + return lastChars2 + (loc - lastChars1) +} + +// DiffPrettyHtml converts a []Diff into a pretty HTML report. +// It is intended as an example from which to write one's own display functions. +func (dmp *DiffMatchPatch) DiffPrettyHtml(diffs []Diff) string { + var buff bytes.Buffer + for _, diff := range diffs { + text := strings.Replace(html.EscapeString(diff.Text), "\n", "¶
    ", -1) + switch diff.Type { + case DiffInsert: + _, _ = buff.WriteString("") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("") + case DiffDelete: + _, _ = buff.WriteString("") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("") + case DiffEqual: + _, _ = buff.WriteString("") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("") + } + } + return buff.String() +} + +// DiffPrettyText converts a []Diff into a colored text report. +func (dmp *DiffMatchPatch) DiffPrettyText(diffs []Diff) string { + var buff bytes.Buffer + for _, diff := range diffs { + text := diff.Text + + switch diff.Type { + case DiffInsert: + _, _ = buff.WriteString("\x1b[32m") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("\x1b[0m") + case DiffDelete: + _, _ = buff.WriteString("\x1b[31m") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("\x1b[0m") + case DiffEqual: + _, _ = buff.WriteString(text) + } + } + + return buff.String() +} + +// DiffText1 computes and returns the source text (all equalities and deletions). +func (dmp *DiffMatchPatch) DiffText1(diffs []Diff) string { + //StringBuilder text = new StringBuilder() + var text bytes.Buffer + + for _, aDiff := range diffs { + if aDiff.Type != DiffInsert { + _, _ = text.WriteString(aDiff.Text) + } + } + return text.String() +} + +// DiffText2 computes and returns the destination text (all equalities and insertions). +func (dmp *DiffMatchPatch) DiffText2(diffs []Diff) string { + var text bytes.Buffer + + for _, aDiff := range diffs { + if aDiff.Type != DiffDelete { + _, _ = text.WriteString(aDiff.Text) + } + } + return text.String() +} + +// DiffLevenshtein computes the Levenshtein distance that is the number of inserted, deleted or substituted characters. +func (dmp *DiffMatchPatch) DiffLevenshtein(diffs []Diff) int { + levenshtein := 0 + insertions := 0 + deletions := 0 + + for _, aDiff := range diffs { + switch aDiff.Type { + case DiffInsert: + insertions += utf8.RuneCountInString(aDiff.Text) + case DiffDelete: + deletions += utf8.RuneCountInString(aDiff.Text) + case DiffEqual: + // A deletion and an insertion is one substitution. + levenshtein += max(insertions, deletions) + insertions = 0 + deletions = 0 + } + } + + levenshtein += max(insertions, deletions) + return levenshtein +} + +// DiffToDelta crushes the diff into an encoded string which describes the operations required to transform text1 into text2. +// E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'. Operations are tab-separated. Inserted text is escaped using %xx notation. +func (dmp *DiffMatchPatch) DiffToDelta(diffs []Diff) string { + var text bytes.Buffer + for _, aDiff := range diffs { + switch aDiff.Type { + case DiffInsert: + _, _ = text.WriteString("+") + _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) + _, _ = text.WriteString("\t") + break + case DiffDelete: + _, _ = text.WriteString("-") + _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) + _, _ = text.WriteString("\t") + break + case DiffEqual: + _, _ = text.WriteString("=") + _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) + _, _ = text.WriteString("\t") + break + } + } + delta := text.String() + if len(delta) != 0 { + // Strip off trailing tab character. + delta = delta[0 : utf8.RuneCountInString(delta)-1] + delta = unescaper.Replace(delta) + } + return delta +} + +// DiffFromDelta given the original text1, and an encoded string which describes the operations required to transform text1 into text2, comAdde the full diff. +func (dmp *DiffMatchPatch) DiffFromDelta(text1 string, delta string) (diffs []Diff, err error) { + i := 0 + runes := []rune(text1) + + for _, token := range strings.Split(delta, "\t") { + if len(token) == 0 { + // Blank tokens are ok (from a trailing \t). + continue + } + + // Each token begins with a one character parameter which specifies the operation of this token (delete, insert, equality). + param := token[1:] + + switch op := token[0]; op { + case '+': + // Decode would Diff all "+" to " " + param = strings.Replace(param, "+", "%2b", -1) + param, err = url.QueryUnescape(param) + if err != nil { + return nil, err + } + if !utf8.ValidString(param) { + return nil, fmt.Errorf("invalid UTF-8 token: %q", param) + } + + diffs = append(diffs, Diff{DiffInsert, param}) + case '=', '-': + n, err := strconv.ParseInt(param, 10, 0) + if err != nil { + return nil, err + } else if n < 0 { + return nil, errors.New("Negative number in DiffFromDelta: " + param) + } + + i += int(n) + // Break out if we are out of bounds, go1.6 can't handle this very well + if i > len(runes) { + break + } + // Remember that string slicing is by byte - we want by rune here. + text := string(runes[i-int(n) : i]) + + if op == '=' { + diffs = append(diffs, Diff{DiffEqual, text}) + } else { + diffs = append(diffs, Diff{DiffDelete, text}) + } + default: + // Anything else is an error. + return nil, errors.New("Invalid diff operation in DiffFromDelta: " + string(token[0])) + } + } + + if i != len(runes) { + return nil, fmt.Errorf("Delta length (%v) is different from source text length (%v)", i, len(text1)) + } + + return diffs, nil +} + +// diffLinesToStrings splits two texts into a list of strings. Each string represents one line. +func (dmp *DiffMatchPatch) diffLinesToStrings(text1, text2 string) (string, string, []string) { + // '\x00' is a valid character, but various debuggers don't like it. So we'll insert a junk entry to avoid generating a null character. + lineArray := []string{""} // e.g. lineArray[4] == 'Hello\n' + + //Each string has the index of lineArray which it points to + strIndexArray1 := dmp.diffLinesToStringsMunge(text1, &lineArray) + strIndexArray2 := dmp.diffLinesToStringsMunge(text2, &lineArray) + + return intArrayToString(strIndexArray1), intArrayToString(strIndexArray2), lineArray +} + +// diffLinesToStringsMunge splits a text into an array of strings, and reduces the texts to a []string. +func (dmp *DiffMatchPatch) diffLinesToStringsMunge(text string, lineArray *[]string) []uint32 { + // Walk the text, pulling out a substring for each line. text.split('\n') would would temporarily double our memory footprint. Modifying text would create many large strings to garbage collect. + lineHash := map[string]int{} // e.g. lineHash['Hello\n'] == 4 + lineStart := 0 + lineEnd := -1 + strs := []uint32{} + + for lineEnd < len(text)-1 { + lineEnd = indexOf(text, "\n", lineStart) + + if lineEnd == -1 { + lineEnd = len(text) - 1 + } + + line := text[lineStart : lineEnd+1] + lineStart = lineEnd + 1 + lineValue, ok := lineHash[line] + + if ok { + strs = append(strs, uint32(lineValue)) + } else { + *lineArray = append(*lineArray, line) + lineHash[line] = len(*lineArray) - 1 + strs = append(strs, uint32(len(*lineArray)-1)) + } + } + + return strs +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go new file mode 100644 index 0000000000..d3acc32ce1 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go @@ -0,0 +1,46 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +// Package diffmatchpatch offers robust algorithms to perform the operations required for synchronizing plain text. +package diffmatchpatch + +import ( + "time" +) + +// DiffMatchPatch holds the configuration for diff-match-patch operations. +type DiffMatchPatch struct { + // Number of seconds to map a diff before giving up (0 for infinity). + DiffTimeout time.Duration + // Cost of an empty edit operation in terms of edit characters. + DiffEditCost int + // How far to search for a match (0 = exact location, 1000+ = broad match). A match this many characters away from the expected location will add 1.0 to the score (0.0 is a perfect match). + MatchDistance int + // When deleting a large block of text (over ~64 characters), how close do the contents have to be to match the expected contents. (0.0 = perfection, 1.0 = very loose). Note that MatchThreshold controls how closely the end points of a delete need to match. + PatchDeleteThreshold float64 + // Chunk size for context length. + PatchMargin int + // The number of bits in an int. + MatchMaxBits int + // At what point is no match declared (0.0 = perfection, 1.0 = very loose). + MatchThreshold float64 +} + +// New creates a new DiffMatchPatch object with default parameters. +func New() *DiffMatchPatch { + // Defaults. + return &DiffMatchPatch{ + DiffTimeout: time.Second, + DiffEditCost: 4, + MatchThreshold: 0.5, + MatchDistance: 1000, + PatchDeleteThreshold: 0.5, + PatchMargin: 4, + MatchMaxBits: 32, + } +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go new file mode 100644 index 0000000000..17374e109f --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go @@ -0,0 +1,160 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "math" +) + +// MatchMain locates the best instance of 'pattern' in 'text' near 'loc'. +// Returns -1 if no match found. +func (dmp *DiffMatchPatch) MatchMain(text, pattern string, loc int) int { + // Check for null inputs not needed since null can't be passed in C#. + + loc = int(math.Max(0, math.Min(float64(loc), float64(len(text))))) + if text == pattern { + // Shortcut (potentially not guaranteed by the algorithm) + return 0 + } else if len(text) == 0 { + // Nothing to match. + return -1 + } else if loc+len(pattern) <= len(text) && text[loc:loc+len(pattern)] == pattern { + // Perfect match at the perfect spot! (Includes case of null pattern) + return loc + } + // Do a fuzzy compare. + return dmp.MatchBitap(text, pattern, loc) +} + +// MatchBitap locates the best instance of 'pattern' in 'text' near 'loc' using the Bitap algorithm. +// Returns -1 if no match was found. +func (dmp *DiffMatchPatch) MatchBitap(text, pattern string, loc int) int { + // Initialise the alphabet. + s := dmp.MatchAlphabet(pattern) + + // Highest score beyond which we give up. + scoreThreshold := dmp.MatchThreshold + // Is there a nearby exact match? (speedup) + bestLoc := indexOf(text, pattern, loc) + if bestLoc != -1 { + scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc, + pattern), scoreThreshold) + // What about in the other direction? (speedup) + bestLoc = lastIndexOf(text, pattern, loc+len(pattern)) + if bestLoc != -1 { + scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc, + pattern), scoreThreshold) + } + } + + // Initialise the bit arrays. + matchmask := 1 << uint((len(pattern) - 1)) + bestLoc = -1 + + var binMin, binMid int + binMax := len(pattern) + len(text) + lastRd := []int{} + for d := 0; d < len(pattern); d++ { + // Scan for the best match; each iteration allows for one more error. Run a binary search to determine how far from 'loc' we can stray at this error level. + binMin = 0 + binMid = binMax + for binMin < binMid { + if dmp.matchBitapScore(d, loc+binMid, loc, pattern) <= scoreThreshold { + binMin = binMid + } else { + binMax = binMid + } + binMid = (binMax-binMin)/2 + binMin + } + // Use the result from this iteration as the maximum for the next. + binMax = binMid + start := int(math.Max(1, float64(loc-binMid+1))) + finish := int(math.Min(float64(loc+binMid), float64(len(text))) + float64(len(pattern))) + + rd := make([]int, finish+2) + rd[finish+1] = (1 << uint(d)) - 1 + + for j := finish; j >= start; j-- { + var charMatch int + if len(text) <= j-1 { + // Out of range. + charMatch = 0 + } else if _, ok := s[text[j-1]]; !ok { + charMatch = 0 + } else { + charMatch = s[text[j-1]] + } + + if d == 0 { + // First pass: exact match. + rd[j] = ((rd[j+1] << 1) | 1) & charMatch + } else { + // Subsequent passes: fuzzy match. + rd[j] = ((rd[j+1]<<1)|1)&charMatch | (((lastRd[j+1] | lastRd[j]) << 1) | 1) | lastRd[j+1] + } + if (rd[j] & matchmask) != 0 { + score := dmp.matchBitapScore(d, j-1, loc, pattern) + // This match will almost certainly be better than any existing match. But check anyway. + if score <= scoreThreshold { + // Told you so. + scoreThreshold = score + bestLoc = j - 1 + if bestLoc > loc { + // When passing loc, don't exceed our current distance from loc. + start = int(math.Max(1, float64(2*loc-bestLoc))) + } else { + // Already passed loc, downhill from here on in. + break + } + } + } + } + if dmp.matchBitapScore(d+1, loc, loc, pattern) > scoreThreshold { + // No hope for a (better) match at greater error levels. + break + } + lastRd = rd + } + return bestLoc +} + +// matchBitapScore computes and returns the score for a match with e errors and x location. +func (dmp *DiffMatchPatch) matchBitapScore(e, x, loc int, pattern string) float64 { + accuracy := float64(e) / float64(len(pattern)) + proximity := math.Abs(float64(loc - x)) + if dmp.MatchDistance == 0 { + // Dodge divide by zero error. + if proximity == 0 { + return accuracy + } + + return 1.0 + } + return accuracy + (proximity / float64(dmp.MatchDistance)) +} + +// MatchAlphabet initialises the alphabet for the Bitap algorithm. +func (dmp *DiffMatchPatch) MatchAlphabet(pattern string) map[byte]int { + s := map[byte]int{} + charPattern := []byte(pattern) + for _, c := range charPattern { + _, ok := s[c] + if !ok { + s[c] = 0 + } + } + i := 0 + + for _, c := range charPattern { + value := s[c] | int(uint(1)< y { + return x + } + return y +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/operation_string.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/operation_string.go new file mode 100644 index 0000000000..533ec0da7b --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/operation_string.go @@ -0,0 +1,17 @@ +// Code generated by "stringer -type=Operation -trimprefix=Diff"; DO NOT EDIT. + +package diffmatchpatch + +import "fmt" + +const _Operation_name = "DeleteEqualInsert" + +var _Operation_index = [...]uint8{0, 6, 11, 17} + +func (i Operation) String() string { + i -= -1 + if i < 0 || i >= Operation(len(_Operation_index)-1) { + return fmt.Sprintf("Operation(%d)", i+-1) + } + return _Operation_name[_Operation_index[i]:_Operation_index[i+1]] +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go new file mode 100644 index 0000000000..0dbe3bdd7d --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go @@ -0,0 +1,556 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "bytes" + "errors" + "math" + "net/url" + "regexp" + "strconv" + "strings" +) + +// Patch represents one patch operation. +type Patch struct { + diffs []Diff + Start1 int + Start2 int + Length1 int + Length2 int +} + +// String emulates GNU diff's format. +// Header: @@ -382,8 +481,9 @@ +// Indices are printed as 1-based, not 0-based. +func (p *Patch) String() string { + var coords1, coords2 string + + if p.Length1 == 0 { + coords1 = strconv.Itoa(p.Start1) + ",0" + } else if p.Length1 == 1 { + coords1 = strconv.Itoa(p.Start1 + 1) + } else { + coords1 = strconv.Itoa(p.Start1+1) + "," + strconv.Itoa(p.Length1) + } + + if p.Length2 == 0 { + coords2 = strconv.Itoa(p.Start2) + ",0" + } else if p.Length2 == 1 { + coords2 = strconv.Itoa(p.Start2 + 1) + } else { + coords2 = strconv.Itoa(p.Start2+1) + "," + strconv.Itoa(p.Length2) + } + + var text bytes.Buffer + _, _ = text.WriteString("@@ -" + coords1 + " +" + coords2 + " @@\n") + + // Escape the body of the patch with %xx notation. + for _, aDiff := range p.diffs { + switch aDiff.Type { + case DiffInsert: + _, _ = text.WriteString("+") + case DiffDelete: + _, _ = text.WriteString("-") + case DiffEqual: + _, _ = text.WriteString(" ") + } + + _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) + _, _ = text.WriteString("\n") + } + + return unescaper.Replace(text.String()) +} + +// PatchAddContext increases the context until it is unique, but doesn't let the pattern expand beyond MatchMaxBits. +func (dmp *DiffMatchPatch) PatchAddContext(patch Patch, text string) Patch { + if len(text) == 0 { + return patch + } + + pattern := text[patch.Start2 : patch.Start2+patch.Length1] + padding := 0 + + // Look for the first and last matches of pattern in text. If two different matches are found, increase the pattern length. + for strings.Index(text, pattern) != strings.LastIndex(text, pattern) && + len(pattern) < dmp.MatchMaxBits-2*dmp.PatchMargin { + padding += dmp.PatchMargin + maxStart := max(0, patch.Start2-padding) + minEnd := min(len(text), patch.Start2+patch.Length1+padding) + pattern = text[maxStart:minEnd] + } + // Add one chunk for good luck. + padding += dmp.PatchMargin + + // Add the prefix. + prefix := text[max(0, patch.Start2-padding):patch.Start2] + if len(prefix) != 0 { + patch.diffs = append([]Diff{Diff{DiffEqual, prefix}}, patch.diffs...) + } + // Add the suffix. + suffix := text[patch.Start2+patch.Length1 : min(len(text), patch.Start2+patch.Length1+padding)] + if len(suffix) != 0 { + patch.diffs = append(patch.diffs, Diff{DiffEqual, suffix}) + } + + // Roll back the start points. + patch.Start1 -= len(prefix) + patch.Start2 -= len(prefix) + // Extend the lengths. + patch.Length1 += len(prefix) + len(suffix) + patch.Length2 += len(prefix) + len(suffix) + + return patch +} + +// PatchMake computes a list of patches. +func (dmp *DiffMatchPatch) PatchMake(opt ...interface{}) []Patch { + if len(opt) == 1 { + diffs, _ := opt[0].([]Diff) + text1 := dmp.DiffText1(diffs) + return dmp.PatchMake(text1, diffs) + } else if len(opt) == 2 { + text1 := opt[0].(string) + switch t := opt[1].(type) { + case string: + diffs := dmp.DiffMain(text1, t, true) + if len(diffs) > 2 { + diffs = dmp.DiffCleanupSemantic(diffs) + diffs = dmp.DiffCleanupEfficiency(diffs) + } + return dmp.PatchMake(text1, diffs) + case []Diff: + return dmp.patchMake2(text1, t) + } + } else if len(opt) == 3 { + return dmp.PatchMake(opt[0], opt[2]) + } + return []Patch{} +} + +// patchMake2 computes a list of patches to turn text1 into text2. +// text2 is not provided, diffs are the delta between text1 and text2. +func (dmp *DiffMatchPatch) patchMake2(text1 string, diffs []Diff) []Patch { + // Check for null inputs not needed since null can't be passed in C#. + patches := []Patch{} + if len(diffs) == 0 { + return patches // Get rid of the null case. + } + + patch := Patch{} + charCount1 := 0 // Number of characters into the text1 string. + charCount2 := 0 // Number of characters into the text2 string. + // Start with text1 (prepatchText) and apply the diffs until we arrive at text2 (postpatchText). We recreate the patches one by one to determine context info. + prepatchText := text1 + postpatchText := text1 + + for i, aDiff := range diffs { + if len(patch.diffs) == 0 && aDiff.Type != DiffEqual { + // A new patch starts here. + patch.Start1 = charCount1 + patch.Start2 = charCount2 + } + + switch aDiff.Type { + case DiffInsert: + patch.diffs = append(patch.diffs, aDiff) + patch.Length2 += len(aDiff.Text) + postpatchText = postpatchText[:charCount2] + + aDiff.Text + postpatchText[charCount2:] + case DiffDelete: + patch.Length1 += len(aDiff.Text) + patch.diffs = append(patch.diffs, aDiff) + postpatchText = postpatchText[:charCount2] + postpatchText[charCount2+len(aDiff.Text):] + case DiffEqual: + if len(aDiff.Text) <= 2*dmp.PatchMargin && + len(patch.diffs) != 0 && i != len(diffs)-1 { + // Small equality inside a patch. + patch.diffs = append(patch.diffs, aDiff) + patch.Length1 += len(aDiff.Text) + patch.Length2 += len(aDiff.Text) + } + if len(aDiff.Text) >= 2*dmp.PatchMargin { + // Time for a new patch. + if len(patch.diffs) != 0 { + patch = dmp.PatchAddContext(patch, prepatchText) + patches = append(patches, patch) + patch = Patch{} + // Unlike Unidiff, our patch lists have a rolling context. http://code.google.com/p/google-diff-match-patch/wiki/Unidiff Update prepatch text & pos to reflect the application of the just completed patch. + prepatchText = postpatchText + charCount1 = charCount2 + } + } + } + + // Update the current character count. + if aDiff.Type != DiffInsert { + charCount1 += len(aDiff.Text) + } + if aDiff.Type != DiffDelete { + charCount2 += len(aDiff.Text) + } + } + + // Pick up the leftover patch if not empty. + if len(patch.diffs) != 0 { + patch = dmp.PatchAddContext(patch, prepatchText) + patches = append(patches, patch) + } + + return patches +} + +// PatchDeepCopy returns an array that is identical to a given an array of patches. +func (dmp *DiffMatchPatch) PatchDeepCopy(patches []Patch) []Patch { + patchesCopy := []Patch{} + for _, aPatch := range patches { + patchCopy := Patch{} + for _, aDiff := range aPatch.diffs { + patchCopy.diffs = append(patchCopy.diffs, Diff{ + aDiff.Type, + aDiff.Text, + }) + } + patchCopy.Start1 = aPatch.Start1 + patchCopy.Start2 = aPatch.Start2 + patchCopy.Length1 = aPatch.Length1 + patchCopy.Length2 = aPatch.Length2 + patchesCopy = append(patchesCopy, patchCopy) + } + return patchesCopy +} + +// PatchApply merges a set of patches onto the text. Returns a patched text, as well as an array of true/false values indicating which patches were applied. +func (dmp *DiffMatchPatch) PatchApply(patches []Patch, text string) (string, []bool) { + if len(patches) == 0 { + return text, []bool{} + } + + // Deep copy the patches so that no changes are made to originals. + patches = dmp.PatchDeepCopy(patches) + + nullPadding := dmp.PatchAddPadding(patches) + text = nullPadding + text + nullPadding + patches = dmp.PatchSplitMax(patches) + + x := 0 + // delta keeps track of the offset between the expected and actual location of the previous patch. If there are patches expected at positions 10 and 20, but the first patch was found at 12, delta is 2 and the second patch has an effective expected position of 22. + delta := 0 + results := make([]bool, len(patches)) + for _, aPatch := range patches { + expectedLoc := aPatch.Start2 + delta + text1 := dmp.DiffText1(aPatch.diffs) + var startLoc int + endLoc := -1 + if len(text1) > dmp.MatchMaxBits { + // PatchSplitMax will only provide an oversized pattern in the case of a monster delete. + startLoc = dmp.MatchMain(text, text1[:dmp.MatchMaxBits], expectedLoc) + if startLoc != -1 { + endLoc = dmp.MatchMain(text, + text1[len(text1)-dmp.MatchMaxBits:], expectedLoc+len(text1)-dmp.MatchMaxBits) + if endLoc == -1 || startLoc >= endLoc { + // Can't find valid trailing context. Drop this patch. + startLoc = -1 + } + } + } else { + startLoc = dmp.MatchMain(text, text1, expectedLoc) + } + if startLoc == -1 { + // No match found. :( + results[x] = false + // Subtract the delta for this failed patch from subsequent patches. + delta -= aPatch.Length2 - aPatch.Length1 + } else { + // Found a match. :) + results[x] = true + delta = startLoc - expectedLoc + var text2 string + if endLoc == -1 { + text2 = text[startLoc:int(math.Min(float64(startLoc+len(text1)), float64(len(text))))] + } else { + text2 = text[startLoc:int(math.Min(float64(endLoc+dmp.MatchMaxBits), float64(len(text))))] + } + if text1 == text2 { + // Perfect match, just shove the Replacement text in. + text = text[:startLoc] + dmp.DiffText2(aPatch.diffs) + text[startLoc+len(text1):] + } else { + // Imperfect match. Run a diff to get a framework of equivalent indices. + diffs := dmp.DiffMain(text1, text2, false) + if len(text1) > dmp.MatchMaxBits && float64(dmp.DiffLevenshtein(diffs))/float64(len(text1)) > dmp.PatchDeleteThreshold { + // The end points match, but the content is unacceptably bad. + results[x] = false + } else { + diffs = dmp.DiffCleanupSemanticLossless(diffs) + index1 := 0 + for _, aDiff := range aPatch.diffs { + if aDiff.Type != DiffEqual { + index2 := dmp.DiffXIndex(diffs, index1) + if aDiff.Type == DiffInsert { + // Insertion + text = text[:startLoc+index2] + aDiff.Text + text[startLoc+index2:] + } else if aDiff.Type == DiffDelete { + // Deletion + startIndex := startLoc + index2 + text = text[:startIndex] + + text[startIndex+dmp.DiffXIndex(diffs, index1+len(aDiff.Text))-index2:] + } + } + if aDiff.Type != DiffDelete { + index1 += len(aDiff.Text) + } + } + } + } + } + x++ + } + // Strip the padding off. + text = text[len(nullPadding) : len(nullPadding)+(len(text)-2*len(nullPadding))] + return text, results +} + +// PatchAddPadding adds some padding on text start and end so that edges can match something. +// Intended to be called only from within patchApply. +func (dmp *DiffMatchPatch) PatchAddPadding(patches []Patch) string { + paddingLength := dmp.PatchMargin + nullPadding := "" + for x := 1; x <= paddingLength; x++ { + nullPadding += string(rune(x)) + } + + // Bump all the patches forward. + for i := range patches { + patches[i].Start1 += paddingLength + patches[i].Start2 += paddingLength + } + + // Add some padding on start of first diff. + if len(patches[0].diffs) == 0 || patches[0].diffs[0].Type != DiffEqual { + // Add nullPadding equality. + patches[0].diffs = append([]Diff{Diff{DiffEqual, nullPadding}}, patches[0].diffs...) + patches[0].Start1 -= paddingLength // Should be 0. + patches[0].Start2 -= paddingLength // Should be 0. + patches[0].Length1 += paddingLength + patches[0].Length2 += paddingLength + } else if paddingLength > len(patches[0].diffs[0].Text) { + // Grow first equality. + extraLength := paddingLength - len(patches[0].diffs[0].Text) + patches[0].diffs[0].Text = nullPadding[len(patches[0].diffs[0].Text):] + patches[0].diffs[0].Text + patches[0].Start1 -= extraLength + patches[0].Start2 -= extraLength + patches[0].Length1 += extraLength + patches[0].Length2 += extraLength + } + + // Add some padding on end of last diff. + last := len(patches) - 1 + if len(patches[last].diffs) == 0 || patches[last].diffs[len(patches[last].diffs)-1].Type != DiffEqual { + // Add nullPadding equality. + patches[last].diffs = append(patches[last].diffs, Diff{DiffEqual, nullPadding}) + patches[last].Length1 += paddingLength + patches[last].Length2 += paddingLength + } else if paddingLength > len(patches[last].diffs[len(patches[last].diffs)-1].Text) { + // Grow last equality. + lastDiff := patches[last].diffs[len(patches[last].diffs)-1] + extraLength := paddingLength - len(lastDiff.Text) + patches[last].diffs[len(patches[last].diffs)-1].Text += nullPadding[:extraLength] + patches[last].Length1 += extraLength + patches[last].Length2 += extraLength + } + + return nullPadding +} + +// PatchSplitMax looks through the patches and breaks up any which are longer than the maximum limit of the match algorithm. +// Intended to be called only from within patchApply. +func (dmp *DiffMatchPatch) PatchSplitMax(patches []Patch) []Patch { + patchSize := dmp.MatchMaxBits + for x := 0; x < len(patches); x++ { + if patches[x].Length1 <= patchSize { + continue + } + bigpatch := patches[x] + // Remove the big old patch. + patches = append(patches[:x], patches[x+1:]...) + x-- + + Start1 := bigpatch.Start1 + Start2 := bigpatch.Start2 + precontext := "" + for len(bigpatch.diffs) != 0 { + // Create one of several smaller patches. + patch := Patch{} + empty := true + patch.Start1 = Start1 - len(precontext) + patch.Start2 = Start2 - len(precontext) + if len(precontext) != 0 { + patch.Length1 = len(precontext) + patch.Length2 = len(precontext) + patch.diffs = append(patch.diffs, Diff{DiffEqual, precontext}) + } + for len(bigpatch.diffs) != 0 && patch.Length1 < patchSize-dmp.PatchMargin { + diffType := bigpatch.diffs[0].Type + diffText := bigpatch.diffs[0].Text + if diffType == DiffInsert { + // Insertions are harmless. + patch.Length2 += len(diffText) + Start2 += len(diffText) + patch.diffs = append(patch.diffs, bigpatch.diffs[0]) + bigpatch.diffs = bigpatch.diffs[1:] + empty = false + } else if diffType == DiffDelete && len(patch.diffs) == 1 && patch.diffs[0].Type == DiffEqual && len(diffText) > 2*patchSize { + // This is a large deletion. Let it pass in one chunk. + patch.Length1 += len(diffText) + Start1 += len(diffText) + empty = false + patch.diffs = append(patch.diffs, Diff{diffType, diffText}) + bigpatch.diffs = bigpatch.diffs[1:] + } else { + // Deletion or equality. Only take as much as we can stomach. + diffText = diffText[:min(len(diffText), patchSize-patch.Length1-dmp.PatchMargin)] + + patch.Length1 += len(diffText) + Start1 += len(diffText) + if diffType == DiffEqual { + patch.Length2 += len(diffText) + Start2 += len(diffText) + } else { + empty = false + } + patch.diffs = append(patch.diffs, Diff{diffType, diffText}) + if diffText == bigpatch.diffs[0].Text { + bigpatch.diffs = bigpatch.diffs[1:] + } else { + bigpatch.diffs[0].Text = + bigpatch.diffs[0].Text[len(diffText):] + } + } + } + // Compute the head context for the next patch. + precontext = dmp.DiffText2(patch.diffs) + precontext = precontext[max(0, len(precontext)-dmp.PatchMargin):] + + postcontext := "" + // Append the end context for this patch. + if len(dmp.DiffText1(bigpatch.diffs)) > dmp.PatchMargin { + postcontext = dmp.DiffText1(bigpatch.diffs)[:dmp.PatchMargin] + } else { + postcontext = dmp.DiffText1(bigpatch.diffs) + } + + if len(postcontext) != 0 { + patch.Length1 += len(postcontext) + patch.Length2 += len(postcontext) + if len(patch.diffs) != 0 && patch.diffs[len(patch.diffs)-1].Type == DiffEqual { + patch.diffs[len(patch.diffs)-1].Text += postcontext + } else { + patch.diffs = append(patch.diffs, Diff{DiffEqual, postcontext}) + } + } + if !empty { + x++ + patches = append(patches[:x], append([]Patch{patch}, patches[x:]...)...) + } + } + } + return patches +} + +// PatchToText takes a list of patches and returns a textual representation. +func (dmp *DiffMatchPatch) PatchToText(patches []Patch) string { + var text bytes.Buffer + for _, aPatch := range patches { + _, _ = text.WriteString(aPatch.String()) + } + return text.String() +} + +// PatchFromText parses a textual representation of patches and returns a List of Patch objects. +func (dmp *DiffMatchPatch) PatchFromText(textline string) ([]Patch, error) { + patches := []Patch{} + if len(textline) == 0 { + return patches, nil + } + text := strings.Split(textline, "\n") + textPointer := 0 + patchHeader := regexp.MustCompile("^@@ -(\\d+),?(\\d*) \\+(\\d+),?(\\d*) @@$") + + var patch Patch + var sign uint8 + var line string + for textPointer < len(text) { + + if !patchHeader.MatchString(text[textPointer]) { + return patches, errors.New("Invalid patch string: " + text[textPointer]) + } + + patch = Patch{} + m := patchHeader.FindStringSubmatch(text[textPointer]) + + patch.Start1, _ = strconv.Atoi(m[1]) + if len(m[2]) == 0 { + patch.Start1-- + patch.Length1 = 1 + } else if m[2] == "0" { + patch.Length1 = 0 + } else { + patch.Start1-- + patch.Length1, _ = strconv.Atoi(m[2]) + } + + patch.Start2, _ = strconv.Atoi(m[3]) + + if len(m[4]) == 0 { + patch.Start2-- + patch.Length2 = 1 + } else if m[4] == "0" { + patch.Length2 = 0 + } else { + patch.Start2-- + patch.Length2, _ = strconv.Atoi(m[4]) + } + textPointer++ + + for textPointer < len(text) { + if len(text[textPointer]) > 0 { + sign = text[textPointer][0] + } else { + textPointer++ + continue + } + + line = text[textPointer][1:] + line = strings.Replace(line, "+", "%2b", -1) + line, _ = url.QueryUnescape(line) + if sign == '-' { + // Deletion. + patch.diffs = append(patch.diffs, Diff{DiffDelete, line}) + } else if sign == '+' { + // Insertion. + patch.diffs = append(patch.diffs, Diff{DiffInsert, line}) + } else if sign == ' ' { + // Minor equality. + patch.diffs = append(patch.diffs, Diff{DiffEqual, line}) + } else if sign == '@' { + // Start of next patch. + break + } else { + // WTF? + return patches, errors.New("Invalid patch mode '" + string(sign) + "' in: " + string(line)) + } + textPointer++ + } + + patches = append(patches, patch) + } + return patches, nil +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go new file mode 100644 index 0000000000..44c4359547 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go @@ -0,0 +1,106 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "strconv" + "strings" + "unicode/utf8" +) + +// unescaper unescapes selected chars for compatibility with JavaScript's encodeURI. +// In speed critical applications this could be dropped since the receiving application will certainly decode these fine. Note that this function is case-sensitive. Thus "%3F" would not be unescaped. But this is ok because it is only called with the output of HttpUtility.UrlEncode which returns lowercase hex. Example: "%3f" -> "?", "%24" -> "$", etc. +var unescaper = strings.NewReplacer( + "%21", "!", "%7E", "~", "%27", "'", + "%28", "(", "%29", ")", "%3B", ";", + "%2F", "/", "%3F", "?", "%3A", ":", + "%40", "@", "%26", "&", "%3D", "=", + "%2B", "+", "%24", "$", "%2C", ",", "%23", "#", "%2A", "*") + +// indexOf returns the first index of pattern in str, starting at str[i]. +func indexOf(str string, pattern string, i int) int { + if i > len(str)-1 { + return -1 + } + if i <= 0 { + return strings.Index(str, pattern) + } + ind := strings.Index(str[i:], pattern) + if ind == -1 { + return -1 + } + return ind + i +} + +// lastIndexOf returns the last index of pattern in str, starting at str[i]. +func lastIndexOf(str string, pattern string, i int) int { + if i < 0 { + return -1 + } + if i >= len(str) { + return strings.LastIndex(str, pattern) + } + _, size := utf8.DecodeRuneInString(str[i:]) + return strings.LastIndex(str[:i+size], pattern) +} + +// runesIndexOf returns the index of pattern in target, starting at target[i]. +func runesIndexOf(target, pattern []rune, i int) int { + if i > len(target)-1 { + return -1 + } + if i <= 0 { + return runesIndex(target, pattern) + } + ind := runesIndex(target[i:], pattern) + if ind == -1 { + return -1 + } + return ind + i +} + +func runesEqual(r1, r2 []rune) bool { + if len(r1) != len(r2) { + return false + } + for i, c := range r1 { + if c != r2[i] { + return false + } + } + return true +} + +// runesIndex is the equivalent of strings.Index for rune slices. +func runesIndex(r1, r2 []rune) int { + last := len(r1) - len(r2) + for i := 0; i <= last; i++ { + if runesEqual(r1[i:i+len(r2)], r2) { + return i + } + } + return -1 +} + +func intArrayToString(ns []uint32) string { + if len(ns) == 0 { + return "" + } + + indexSeparator := IndexSeparator[0] + + // Appr. 3 chars per num plus the comma. + b := []byte{} + for _, n := range ns { + b = strconv.AppendInt(b, int64(n), 10) + b = append(b, indexSeparator) + } + b = b[:len(b)-1] + return string(b) +} diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go index ea0798d870..d06975e712 100644 --- a/vendor/github.com/spf13/afero/memmap.go +++ b/vendor/github.com/spf13/afero/memmap.go @@ -142,6 +142,11 @@ func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { } m.mu.Lock() + // Dobule check that it doesn't exist. + if _, ok := m.getData()[name]; ok { + m.mu.Unlock() + return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists} + } item := mem.CreateDir(name) mem.SetMode(item, os.ModeDir|perm) m.getData()[name] = item diff --git a/vendor/github.com/spf13/viper/Makefile b/vendor/github.com/spf13/viper/Makefile index 130c427e8b..3f4234d334 100644 --- a/vendor/github.com/spf13/viper/Makefile +++ b/vendor/github.com/spf13/viper/Makefile @@ -16,7 +16,7 @@ endif # Dependency versions GOTESTSUM_VERSION = 1.8.0 -GOLANGCI_VERSION = 1.49.0 +GOLANGCI_VERSION = 1.50.1 # Add the ability to override some variables # Use with care diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md index 63413a7dc0..cd39290521 100644 --- a/vendor/github.com/spf13/viper/README.md +++ b/vendor/github.com/spf13/viper/README.md @@ -8,7 +8,7 @@ [![Mentioned in Awesome Go](https://awesome.re/mentioned-badge-flat.svg)](https://github.com/avelino/awesome-go#configuration) [![run on repl.it](https://repl.it/badge/github/sagikazarmark/Viper-example)](https://repl.it/@sagikazarmark/Viper-example#main.go) -[![GitHub Workflow Status](https://img.shields.io/github/workflow/status/spf13/viper/CI?style=flat-square)](https://github.com/spf13/viper/actions?query=workflow%3ACI) +[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/spf13/viper/ci.yaml?branch=master&style=flat-square)](https://github.com/spf13/viper/actions?query=workflow%3ACI) [![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Go Report Card](https://goreportcard.com/badge/github.com/spf13/viper?style=flat-square)](https://goreportcard.com/report/github.com/spf13/viper) ![Go Version](https://img.shields.io/badge/go%20version-%3E=1.16-61CFDD.svg?style=flat-square) @@ -40,8 +40,8 @@ go get github.com/spf13/viper ## What is Viper? -Viper is a complete configuration solution for Go applications including 12-Factor apps. It is designed -to work within an application, and can handle all types of configuration needs +Viper is a complete configuration solution for Go applications including [12-Factor apps](https://12factor.net/#the_twelve_factors). +It is designed to work within an application, and can handle all types of configuration needs and formats. It supports: * setting defaults diff --git a/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go b/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go index 45fddc8b59..a993c5994b 100644 --- a/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go +++ b/vendor/github.com/spf13/viper/internal/encoding/toml/codec.go @@ -1,39 +1,16 @@ -//go:build viper_toml1 -// +build viper_toml1 - package toml import ( - "github.com/pelletier/go-toml" + "github.com/pelletier/go-toml/v2" ) // Codec implements the encoding.Encoder and encoding.Decoder interfaces for TOML encoding. type Codec struct{} func (Codec) Encode(v map[string]interface{}) ([]byte, error) { - t, err := toml.TreeFromMap(v) - if err != nil { - return nil, err - } - - s, err := t.ToTomlString() - if err != nil { - return nil, err - } - - return []byte(s), nil + return toml.Marshal(v) } func (Codec) Decode(b []byte, v map[string]interface{}) error { - tree, err := toml.LoadBytes(b) - if err != nil { - return err - } - - tmap := tree.ToMap() - for key, value := range tmap { - v[key] = value - } - - return nil + return toml.Unmarshal(b, &v) } diff --git a/vendor/github.com/spf13/viper/internal/encoding/toml/codec2.go b/vendor/github.com/spf13/viper/internal/encoding/toml/codec2.go deleted file mode 100644 index 112c6d3725..0000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/toml/codec2.go +++ /dev/null @@ -1,19 +0,0 @@ -//go:build !viper_toml1 -// +build !viper_toml1 - -package toml - -import ( - "github.com/pelletier/go-toml/v2" -) - -// Codec implements the encoding.Encoder and encoding.Decoder interfaces for TOML encoding. -type Codec struct{} - -func (Codec) Encode(v map[string]interface{}) ([]byte, error) { - return toml.Marshal(v) -} - -func (Codec) Decode(b []byte, v map[string]interface{}) error { - return toml.Unmarshal(b, &v) -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go b/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go index 24cc19dfca..82dc136a3a 100644 --- a/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go +++ b/vendor/github.com/spf13/viper/internal/encoding/yaml/codec.go @@ -1,6 +1,6 @@ package yaml -// import "gopkg.in/yaml.v2" +import "gopkg.in/yaml.v3" // Codec implements the encoding.Encoder and encoding.Decoder interfaces for YAML encoding. type Codec struct{} diff --git a/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml2.go b/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml2.go deleted file mode 100644 index 4c398c2f42..0000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml2.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build viper_yaml2 -// +build viper_yaml2 - -package yaml - -import yamlv2 "gopkg.in/yaml.v2" - -var yaml = struct { - Marshal func(in interface{}) (out []byte, err error) - Unmarshal func(in []byte, out interface{}) (err error) -}{ - Marshal: yamlv2.Marshal, - Unmarshal: yamlv2.Unmarshal, -} diff --git a/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml3.go b/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml3.go deleted file mode 100644 index 3a4775ced9..0000000000 --- a/vendor/github.com/spf13/viper/internal/encoding/yaml/yaml3.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build !viper_yaml2 -// +build !viper_yaml2 - -package yaml - -import yamlv3 "gopkg.in/yaml.v3" - -var yaml = struct { - Marshal func(in interface{}) (out []byte, err error) - Unmarshal func(in []byte, out interface{}) (err error) -}{ - Marshal: yamlv3.Marshal, - Unmarshal: yamlv3.Unmarshal, -} diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go index 5c12529b4c..06610fc5a7 100644 --- a/vendor/github.com/spf13/viper/viper.go +++ b/vendor/github.com/spf13/viper/viper.go @@ -421,13 +421,18 @@ var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props // SupportedRemoteProviders are universally supported remote providers. var SupportedRemoteProviders = []string{"etcd", "etcd3", "consul", "firestore"} +// OnConfigChange sets the event handler that is called when a config file changes. func OnConfigChange(run func(in fsnotify.Event)) { v.OnConfigChange(run) } + +// OnConfigChange sets the event handler that is called when a config file changes. func (v *Viper) OnConfigChange(run func(in fsnotify.Event)) { v.onConfigChange = run } +// WatchConfig starts watching a config file for changes. func WatchConfig() { v.WatchConfig() } +// WatchConfig starts watching a config file for changes. func (v *Viper) WatchConfig() { initWG := sync.WaitGroup{} initWG.Add(1) diff --git a/vendor/github.com/subosito/gotenv/gotenv.go b/vendor/github.com/subosito/gotenv/gotenv.go index 7b1186e1fd..dc013e1e07 100644 --- a/vendor/github.com/subosito/gotenv/gotenv.go +++ b/vendor/github.com/subosito/gotenv/gotenv.go @@ -3,6 +3,7 @@ package gotenv import ( "bufio" + "bytes" "fmt" "io" "os" @@ -174,9 +175,36 @@ func Write(env Env, filename string) error { return file.Sync() } +// splitLines is a valid SplitFunc for a bufio.Scanner. It will split lines on CR ('\r'), LF ('\n') or CRLF (any of the three sequences). +// If a CR is immediately followed by a LF, it is treated as a CRLF (one single line break). +func splitLines(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, bufio.ErrFinalToken + } + + idx := bytes.IndexAny(data, "\r\n") + switch { + case atEOF && idx < 0: + return len(data), data, bufio.ErrFinalToken + + case idx < 0: + return 0, nil, nil + } + + // consume CR or LF + eol := idx + 1 + // detect CRLF + if len(data) > eol && data[eol-1] == '\r' && data[eol] == '\n' { + eol++ + } + + return eol, data[:idx], nil +} + func strictParse(r io.Reader, override bool) (Env, error) { env := make(Env) scanner := bufio.NewScanner(r) + scanner.Split(splitLines) firstLine := true @@ -283,7 +311,6 @@ func parseLine(s string, env Env, override bool) error { return varReplacement(s, hsq, env, override) } val = varRgx.ReplaceAllStringFunc(val, fv) - val = parseVal(val, env, hdq, override) } env[key] = val @@ -352,18 +379,3 @@ func checkFormat(s string, env Env) error { return fmt.Errorf("line `%s` doesn't match format", s) } - -func parseVal(val string, env Env, ignoreNewlines bool, override bool) string { - if strings.Contains(val, "=") && !ignoreNewlines { - kv := strings.Split(val, "\r") - - if len(kv) > 1 { - val = kv[0] - for _, l := range kv[1:] { - _ = parseLine(l, env, override) - } - } - } - - return val -} diff --git a/vendor/github.com/ulikunitz/xz/.gitignore b/vendor/github.com/ulikunitz/xz/.gitignore new file mode 100644 index 0000000000..eb3d5f5173 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/.gitignore @@ -0,0 +1,28 @@ +# .gitignore + +TODO.html +README.html + +lzma/writer.txt +lzma/reader.txt + +cmd/gxz/gxz +cmd/xb/xb + +# test executables +*.test + +# profile files +*.out + +# vim swap file +.*.swp + +# executables on windows +*.exe + +# default compression test file +enwik8* + +# file generated by example +example.xz \ No newline at end of file diff --git a/vendor/github.com/ulikunitz/xz/LICENSE b/vendor/github.com/ulikunitz/xz/LICENSE new file mode 100644 index 0000000000..8a7f0877d3 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/LICENSE @@ -0,0 +1,26 @@ +Copyright (c) 2014-2022 Ulrich Kunitz +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* My name, Ulrich Kunitz, may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ulikunitz/xz/README.md b/vendor/github.com/ulikunitz/xz/README.md new file mode 100644 index 0000000000..5547185213 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/README.md @@ -0,0 +1,77 @@ +# Package xz + +This Go language package supports the reading and writing of xz +compressed streams. It includes also a gxz command for compressing and +decompressing data. The package is completely written in Go and doesn't +have any dependency on any C code. + +The package is currently under development. There might be bugs and APIs +are not considered stable. At this time the package cannot compete with +the xz tool regarding compression speed and size. The algorithms there +have been developed over a long time and are highly optimized. However +there are a number of improvements planned and I'm very optimistic about +parallel compression and decompression. Stay tuned! + +## Using the API + +The following example program shows how to use the API. + +```go +package main + +import ( + "bytes" + "io" + "log" + "os" + + "github.com/ulikunitz/xz" +) + +func main() { + const text = "The quick brown fox jumps over the lazy dog.\n" + var buf bytes.Buffer + // compress text + w, err := xz.NewWriter(&buf) + if err != nil { + log.Fatalf("xz.NewWriter error %s", err) + } + if _, err := io.WriteString(w, text); err != nil { + log.Fatalf("WriteString error %s", err) + } + if err := w.Close(); err != nil { + log.Fatalf("w.Close error %s", err) + } + // decompress buffer and write output to stdout + r, err := xz.NewReader(&buf) + if err != nil { + log.Fatalf("NewReader error %s", err) + } + if _, err = io.Copy(os.Stdout, r); err != nil { + log.Fatalf("io.Copy error %s", err) + } +} +``` + +## Documentation + +You can find the full documentation at [pkg.go.dev](https://pkg.go.dev/github.com/ulikunitz/xz). + +## Using the gxz compression tool + +The package includes a gxz command line utility for compression and +decompression. + +Use following command for installation: + + $ go get github.com/ulikunitz/xz/cmd/gxz + +To test it call the following command. + + $ gxz bigfile + +After some time a much smaller file bigfile.xz will replace bigfile. +To decompress it use the following command. + + $ gxz -d bigfile.xz + diff --git a/vendor/github.com/ulikunitz/xz/SECURITY.md b/vendor/github.com/ulikunitz/xz/SECURITY.md new file mode 100644 index 0000000000..5f7ec01b3b --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/SECURITY.md @@ -0,0 +1,10 @@ +# Security Policy + +## Supported Versions + +Currently the last minor version v0.5.x is supported. + +## Reporting a Vulnerability + +Report a vulnerability by creating a Github issue at +. Expect a response in a week. diff --git a/vendor/github.com/ulikunitz/xz/TODO.md b/vendor/github.com/ulikunitz/xz/TODO.md new file mode 100644 index 0000000000..a3d6f19250 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/TODO.md @@ -0,0 +1,372 @@ +# TODO list + +## Release v0.5.x + +1. Support check flag in gxz command. + +## Release v0.6 + +1. Review encoder and check for lzma improvements under xz. +2. Fix binary tree matcher. +3. Compare compression ratio with xz tool using comparable parameters and optimize parameters +4. rename operation action and make it a simple type of size 8 +5. make maxMatches, wordSize parameters +6. stop searching after a certain length is found (parameter sweetLen) + +## Release v0.7 + +1. Optimize code +2. Do statistical analysis to get linear presets. +3. Test sync.Pool compatability for xz and lzma Writer and Reader +4. Fuzz optimized code. + +## Release v0.8 + +1. Support parallel go routines for writing and reading xz files. +2. Support a ReaderAt interface for xz files with small block sizes. +3. Improve compatibility between gxz and xz +4. Provide manual page for gxz + +## Release v0.9 + +1. Improve documentation +2. Fuzz again + +## Release v1.0 + +1. Full functioning gxz +2. Add godoc URL to README.md (godoc.org) +3. Resolve all issues. +4. Define release candidates. +5. Public announcement. + +## Package lzma + +### v0.6 + +* Rewrite Encoder into a simple greedy one-op-at-a-time encoder including + * simple scan at the dictionary head for the same byte + * use the killer byte (requiring matches to get longer, the first test should be the byte that would make the match longer) + +## Optimizations + +* There may be a lot of false sharing in lzma. State; check whether this can be improved by reorganizing the internal structure of it. + +* Check whether batching encoding and decoding improves speed. + +### DAG optimizations + +* Use full buffer to create minimal bit-length above range encoder. +* Might be too slow (see v0.4) + +### Different match finders + +* hashes with 2, 3 characters additional to 4 characters +* binary trees with 2-7 characters (uint64 as key, use uint32 as + + pointers into a an array) + +* rb-trees with 2-7 characters (uint64 as key, use uint32 as pointers + + into an array with bit-steeling for the colors) + +## Release Procedure + +* execute goch -l for all packages; probably with lower param like 0.5. +* check orthography with gospell +* Write release notes in doc/relnotes. +* Update README.md +* xb copyright . in xz directory to ensure all new files have Copyright header +* `VERSION= go generate github.com/ulikunitz/xz/...` to update version files +* Execute test for Linux/amd64, Linux/x86 and Windows/amd64. +* Update TODO.md - write short log entry +* `git checkout master && git merge dev` +* `git tag -a ` +* `git push` + +## Log + +### 2022-12-12 + +Matt Dantay (@bodgit) reported an issue with the LZMA reader. The implementation +returned an error if the dictionary size was less than 4096 byte, but the +recommendation stated the actual used window size should be set to 4096 byte in +that case. It actually was the pull request +[#52](https://github.com/ulikunitz/xz/pull/52). The new patch v0.5.11 will fix +it. + +### 2021-02-02 + +Mituo Heijo has fuzzed xz and found a bug in the function readIndexBody. The +function allocated a slice of records immediately after reading the value +without further checks. Sincex the number has been too large the make function +did panic. The fix is to check the number against the expected number of records +before allocating the records. + +### 2020-12-17 + +Release v0.5.9 fixes warnings, a typo and adds SECURITY.md. + +One fix is interesting. + +```go +const ( + a byte = 0x1 + b = 0x2 +) +``` + +The constants a and b don't have the same type. Correct is + +```go +const ( + a byte = 0x1 + b byte = 0x2 +) +``` + +### 2020-08-19 + +Release v0.5.8 fixes issue +[issue #35](https://github.com/ulikunitz/xz/issues/35). + +### 2020-02-24 + +Release v0.5.7 supports the check-ID None and fixes +[issue #27](https://github.com/ulikunitz/xz/issues/27). + +### 2019-02-20 + +Release v0.5.6 supports the go.mod file. + +### 2018-10-28 + +Release v0.5.5 fixes issues #19 observing ErrLimit outputs. + +### 2017-06-05 + +Release v0.5.4 fixes issues #15 of another problem with the padding size +check for the xz block header. I removed the check completely. + +### 2017-02-15 + +Release v0.5.3 fixes issue #12 regarding the decompression of an empty +XZ stream. Many thanks to Tomasz Kłak, who reported the issue. + +### 2016-12-02 + +Release v0.5.2 became necessary to allow the decoding of xz files with +4-byte padding in the block header. Many thanks to Greg, who reported +the issue. + +### 2016-07-23 + +Release v0.5.1 became necessary to fix problems with 32-bit platforms. +Many thanks to Bruno Brigas, who reported the issue. + +### 2016-07-04 + +Release v0.5 provides improvements to the compressor and provides support for +the decompression of xz files with multiple xz streams. + +### 2016-01-31 + +Another compression rate increase by checking the byte at length of the +best match first, before checking the whole prefix. This makes the +compressor even faster. We have now a large time budget to beat the +compression ratio of the xz tool. For enwik8 we have now over 40 seconds +to reduce the compressed file size for another 7 MiB. + +### 2016-01-30 + +I simplified the encoder. Speed and compression rate increased +dramatically. A high compression rate affects also the decompression +speed. The approach with the buffer and optimizing for operation +compression rate has not been successful. Going for the maximum length +appears to be the best approach. + +### 2016-01-28 + +The release v0.4 is ready. It provides a working xz implementation, +which is rather slow, but works and is interoperable with the xz tool. +It is an important milestone. + +### 2016-01-10 + +I have the first working implementation of an xz reader and writer. I'm +happy about reaching this milestone. + +### 2015-12-02 + +I'm now ready to implement xz because, I have a working LZMA2 +implementation. I decided today that v0.4 will use the slow encoder +using the operations buffer to be able to go back, if I intend to do so. + +### 2015-10-21 + +I have restarted the work on the library. While trying to implement +LZMA2, I discovered that I need to resimplify the encoder and decoder +functions. The option approach is too complicated. Using a limited byte +writer and not caring for written bytes at all and not to try to handle +uncompressed data simplifies the LZMA encoder and decoder much. +Processing uncompressed data and handling limits is a feature of the +LZMA2 format not of LZMA. + +I learned an interesting method from the LZO format. If the last copy is +too far away they are moving the head one 2 bytes and not 1 byte to +reduce processing times. + +### 2015-08-26 + +I have now reimplemented the lzma package. The code is reasonably fast, +but can still be optimized. The next step is to implement LZMA2 and then +xz. + +### 2015-07-05 + +Created release v0.3. The version is the foundation for a full xz +implementation that is the target of v0.4. + +### 2015-06-11 + +The gflag package has been developed because I couldn't use flag and +pflag for a fully compatible support of gzip's and lzma's options. It +seems to work now quite nicely. + +### 2015-06-05 + +The overflow issue was interesting to research, however Henry S. Warren +Jr. Hacker's Delight book was very helpful as usual and had the issue +explained perfectly. Fefe's information on his website was based on the +C FAQ and quite bad, because it didn't address the issue of -MININT == +MININT. + +### 2015-06-04 + +It has been a productive day. I improved the interface of lzma. Reader +and lzma. Writer and fixed the error handling. + +### 2015-06-01 + +By computing the bit length of the LZMA operations I was able to +improve the greedy algorithm implementation. By using an 8 MByte buffer +the compression rate was not as good as for xz but already better then +gzip default. + +Compression is currently slow, but this is something we will be able to +improve over time. + +### 2015-05-26 + +Checked the license of ogier/pflag. The binary lzmago binary should +include the license terms for the pflag library. + +I added the endorsement clause as used by Google for the Go sources the +LICENSE file. + +### 2015-05-22 + +The package lzb contains now the basic implementation for creating or +reading LZMA byte streams. It allows the support for the implementation +of the DAG-shortest-path algorithm for the compression function. + +### 2015-04-23 + +Completed yesterday the lzbase classes. I'm a little bit concerned that +using the components may require too much code, but on the other hand +there is a lot of flexibility. + +### 2015-04-22 + +Implemented Reader and Writer during the Bayern game against Porto. The +second half gave me enough time. + +### 2015-04-21 + +While showering today morning I discovered that the design for OpEncoder +and OpDecoder doesn't work, because encoding/decoding might depend on +the current status of the dictionary. This is not exactly the right way +to start the day. + +Therefore we need to keep the Reader and Writer design. This time around +we simplify it by ignoring size limits. These can be added by wrappers +around the Reader and Writer interfaces. The Parameters type isn't +needed anymore. + +However I will implement a ReaderState and WriterState type to use +static typing to ensure the right State object is combined with the +right lzbase. Reader and lzbase. Writer. + +As a start I have implemented ReaderState and WriterState to ensure +that the state for reading is only used by readers and WriterState only +used by Writers. + +### 2015-04-20 + +Today I implemented the OpDecoder and tested OpEncoder and OpDecoder. + +### 2015-04-08 + +Came up with a new simplified design for lzbase. I implemented already +the type State that replaces OpCodec. + +### 2015-04-06 + +The new lzma package is now fully usable and lzmago is using it now. The +old lzma package has been completely removed. + +### 2015-04-05 + +Implemented lzma. Reader and tested it. + +### 2015-04-04 + +Implemented baseReader by adapting code form lzma. Reader. + +### 2015-04-03 + +The opCodec has been copied yesterday to lzma2. opCodec has a high +number of dependencies on other files in lzma2. Therefore I had to copy +almost all files from lzma. + +### 2015-03-31 + +Removed only a TODO item. + +However in Francesco Campoy's presentation "Go for Javaneros +(Javaïstes?)" is the the idea that using an embedded field E, all the +methods of E will be defined on T. If E is an interface T satisfies E. + + + +I have never used this, but it seems to be a cool idea. + +### 2015-03-30 + +Finished the type writerDict and wrote a simple test. + +### 2015-03-25 + +I started to implement the writerDict. + +### 2015-03-24 + +After thinking long about the LZMA2 code and several false starts, I +have now a plan to create a self-sufficient lzma2 package that supports +the classic LZMA format as well as LZMA2. The core idea is to support a +baseReader and baseWriter type that support the basic LZMA stream +without any headers. Both types must support the reuse of dictionaries +and the opCodec. + +### 2015-01-10 + +1. Implemented simple lzmago tool +2. Tested tool against large 4.4G file + * compression worked correctly; tested decompression with lzma + * decompression hits a full buffer condition +3. Fixed a bug in the compressor and wrote a test for it +4. Executed full cycle for 4.4 GB file; performance can be improved ;-) + +### 2015-01-11 + +* Release v0.2 because of the working LZMA encoder and decoder diff --git a/vendor/github.com/ulikunitz/xz/bits.go b/vendor/github.com/ulikunitz/xz/bits.go new file mode 100644 index 0000000000..b30f1ec97d --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/bits.go @@ -0,0 +1,79 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "io" +) + +// putUint32LE puts the little-endian representation of x into the first +// four bytes of p. +func putUint32LE(p []byte, x uint32) { + p[0] = byte(x) + p[1] = byte(x >> 8) + p[2] = byte(x >> 16) + p[3] = byte(x >> 24) +} + +// putUint64LE puts the little-endian representation of x into the first +// eight bytes of p. +func putUint64LE(p []byte, x uint64) { + p[0] = byte(x) + p[1] = byte(x >> 8) + p[2] = byte(x >> 16) + p[3] = byte(x >> 24) + p[4] = byte(x >> 32) + p[5] = byte(x >> 40) + p[6] = byte(x >> 48) + p[7] = byte(x >> 56) +} + +// uint32LE converts a little endian representation to an uint32 value. +func uint32LE(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | + uint32(p[3])<<24 +} + +// putUvarint puts a uvarint representation of x into the byte slice. +func putUvarint(p []byte, x uint64) int { + i := 0 + for x >= 0x80 { + p[i] = byte(x) | 0x80 + x >>= 7 + i++ + } + p[i] = byte(x) + return i + 1 +} + +// errOverflow indicates an overflow of the 64-bit unsigned integer. +var errOverflowU64 = errors.New("xz: uvarint overflows 64-bit unsigned integer") + +// readUvarint reads a uvarint from the given byte reader. +func readUvarint(r io.ByteReader) (x uint64, n int, err error) { + const maxUvarintLen = 10 + + var s uint + i := 0 + for { + b, err := r.ReadByte() + if err != nil { + return x, i, err + } + i++ + if i > maxUvarintLen { + return x, i, errOverflowU64 + } + if b < 0x80 { + if i == maxUvarintLen && b > 1 { + return x, i, errOverflowU64 + } + return x | uint64(b)< 0 { + k = 4 - k + } + return k +} + +/*** Header ***/ + +// headerMagic stores the magic bytes for the header +var headerMagic = []byte{0xfd, '7', 'z', 'X', 'Z', 0x00} + +// HeaderLen provides the length of the xz file header. +const HeaderLen = 12 + +// Constants for the checksum methods supported by xz. +const ( + None byte = 0x0 + CRC32 byte = 0x1 + CRC64 byte = 0x4 + SHA256 byte = 0xa +) + +// errInvalidFlags indicates that flags are invalid. +var errInvalidFlags = errors.New("xz: invalid flags") + +// verifyFlags returns the error errInvalidFlags if the value is +// invalid. +func verifyFlags(flags byte) error { + switch flags { + case None, CRC32, CRC64, SHA256: + return nil + default: + return errInvalidFlags + } +} + +// flagstrings maps flag values to strings. +var flagstrings = map[byte]string{ + None: "None", + CRC32: "CRC-32", + CRC64: "CRC-64", + SHA256: "SHA-256", +} + +// flagString returns the string representation for the given flags. +func flagString(flags byte) string { + s, ok := flagstrings[flags] + if !ok { + return "invalid" + } + return s +} + +// newHashFunc returns a function that creates hash instances for the +// hash method encoded in flags. +func newHashFunc(flags byte) (newHash func() hash.Hash, err error) { + switch flags { + case None: + newHash = newNoneHash + case CRC32: + newHash = newCRC32 + case CRC64: + newHash = newCRC64 + case SHA256: + newHash = sha256.New + default: + err = errInvalidFlags + } + return +} + +// header provides the actual content of the xz file header: the flags. +type header struct { + flags byte +} + +// Errors returned by readHeader. +var errHeaderMagic = errors.New("xz: invalid header magic bytes") + +// ValidHeader checks whether data is a correct xz file header. The +// length of data must be HeaderLen. +func ValidHeader(data []byte) bool { + var h header + err := h.UnmarshalBinary(data) + return err == nil +} + +// String returns a string representation of the flags. +func (h header) String() string { + return flagString(h.flags) +} + +// UnmarshalBinary reads header from the provided data slice. +func (h *header) UnmarshalBinary(data []byte) error { + // header length + if len(data) != HeaderLen { + return errors.New("xz: wrong file header length") + } + + // magic header + if !bytes.Equal(headerMagic, data[:6]) { + return errHeaderMagic + } + + // checksum + crc := crc32.NewIEEE() + crc.Write(data[6:8]) + if uint32LE(data[8:]) != crc.Sum32() { + return errors.New("xz: invalid checksum for file header") + } + + // stream flags + if data[6] != 0 { + return errInvalidFlags + } + flags := data[7] + if err := verifyFlags(flags); err != nil { + return err + } + + h.flags = flags + return nil +} + +// MarshalBinary generates the xz file header. +func (h *header) MarshalBinary() (data []byte, err error) { + if err = verifyFlags(h.flags); err != nil { + return nil, err + } + + data = make([]byte, 12) + copy(data, headerMagic) + data[7] = h.flags + + crc := crc32.NewIEEE() + crc.Write(data[6:8]) + putUint32LE(data[8:], crc.Sum32()) + + return data, nil +} + +/*** Footer ***/ + +// footerLen defines the length of the footer. +const footerLen = 12 + +// footerMagic contains the footer magic bytes. +var footerMagic = []byte{'Y', 'Z'} + +// footer represents the content of the xz file footer. +type footer struct { + indexSize int64 + flags byte +} + +// String prints a string representation of the footer structure. +func (f footer) String() string { + return fmt.Sprintf("%s index size %d", flagString(f.flags), f.indexSize) +} + +// Minimum and maximum for the size of the index (backward size). +const ( + minIndexSize = 4 + maxIndexSize = (1 << 32) * 4 +) + +// MarshalBinary converts footer values into an xz file footer. Note +// that the footer value is checked for correctness. +func (f *footer) MarshalBinary() (data []byte, err error) { + if err = verifyFlags(f.flags); err != nil { + return nil, err + } + if !(minIndexSize <= f.indexSize && f.indexSize <= maxIndexSize) { + return nil, errors.New("xz: index size out of range") + } + if f.indexSize%4 != 0 { + return nil, errors.New( + "xz: index size not aligned to four bytes") + } + + data = make([]byte, footerLen) + + // backward size (index size) + s := (f.indexSize / 4) - 1 + putUint32LE(data[4:], uint32(s)) + // flags + data[9] = f.flags + // footer magic + copy(data[10:], footerMagic) + + // CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[4:10]) + putUint32LE(data, crc.Sum32()) + + return data, nil +} + +// UnmarshalBinary sets the footer value by unmarshalling an xz file +// footer. +func (f *footer) UnmarshalBinary(data []byte) error { + if len(data) != footerLen { + return errors.New("xz: wrong footer length") + } + + // magic bytes + if !bytes.Equal(data[10:], footerMagic) { + return errors.New("xz: footer magic invalid") + } + + // CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[4:10]) + if uint32LE(data) != crc.Sum32() { + return errors.New("xz: footer checksum error") + } + + var g footer + // backward size (index size) + g.indexSize = (int64(uint32LE(data[4:])) + 1) * 4 + + // flags + if data[8] != 0 { + return errInvalidFlags + } + g.flags = data[9] + if err := verifyFlags(g.flags); err != nil { + return err + } + + *f = g + return nil +} + +/*** Block Header ***/ + +// blockHeader represents the content of an xz block header. +type blockHeader struct { + compressedSize int64 + uncompressedSize int64 + filters []filter +} + +// String converts the block header into a string. +func (h blockHeader) String() string { + var buf bytes.Buffer + first := true + if h.compressedSize >= 0 { + fmt.Fprintf(&buf, "compressed size %d", h.compressedSize) + first = false + } + if h.uncompressedSize >= 0 { + if !first { + buf.WriteString(" ") + } + fmt.Fprintf(&buf, "uncompressed size %d", h.uncompressedSize) + first = false + } + for _, f := range h.filters { + if !first { + buf.WriteString(" ") + } + fmt.Fprintf(&buf, "filter %s", f) + first = false + } + return buf.String() +} + +// Masks for the block flags. +const ( + filterCountMask = 0x03 + compressedSizePresent = 0x40 + uncompressedSizePresent = 0x80 + reservedBlockFlags = 0x3C +) + +// errIndexIndicator signals that an index indicator (0x00) has been found +// instead of an expected block header indicator. +var errIndexIndicator = errors.New("xz: found index indicator") + +// readBlockHeader reads the block header. +func readBlockHeader(r io.Reader) (h *blockHeader, n int, err error) { + var buf bytes.Buffer + buf.Grow(20) + + // block header size + z, err := io.CopyN(&buf, r, 1) + n = int(z) + if err != nil { + return nil, n, err + } + s := buf.Bytes()[0] + if s == 0 { + return nil, n, errIndexIndicator + } + + // read complete header + headerLen := (int(s) + 1) * 4 + buf.Grow(headerLen - 1) + z, err = io.CopyN(&buf, r, int64(headerLen-1)) + n += int(z) + if err != nil { + return nil, n, err + } + + // unmarshal block header + h = new(blockHeader) + if err = h.UnmarshalBinary(buf.Bytes()); err != nil { + return nil, n, err + } + + return h, n, nil +} + +// readSizeInBlockHeader reads the uncompressed or compressed size +// fields in the block header. The present value informs the function +// whether the respective field is actually present in the header. +func readSizeInBlockHeader(r io.ByteReader, present bool) (n int64, err error) { + if !present { + return -1, nil + } + x, _, err := readUvarint(r) + if err != nil { + return 0, err + } + if x >= 1<<63 { + return 0, errors.New("xz: size overflow in block header") + } + return int64(x), nil +} + +// UnmarshalBinary unmarshals the block header. +func (h *blockHeader) UnmarshalBinary(data []byte) error { + // Check header length + s := data[0] + if data[0] == 0 { + return errIndexIndicator + } + headerLen := (int(s) + 1) * 4 + if len(data) != headerLen { + return fmt.Errorf("xz: data length %d; want %d", len(data), + headerLen) + } + n := headerLen - 4 + + // Check CRC-32 + crc := crc32.NewIEEE() + crc.Write(data[:n]) + if crc.Sum32() != uint32LE(data[n:]) { + return errors.New("xz: checksum error for block header") + } + + // Block header flags + flags := data[1] + if flags&reservedBlockFlags != 0 { + return errors.New("xz: reserved block header flags set") + } + + r := bytes.NewReader(data[2:n]) + + // Compressed size + var err error + h.compressedSize, err = readSizeInBlockHeader( + r, flags&compressedSizePresent != 0) + if err != nil { + return err + } + + // Uncompressed size + h.uncompressedSize, err = readSizeInBlockHeader( + r, flags&uncompressedSizePresent != 0) + if err != nil { + return err + } + + h.filters, err = readFilters(r, int(flags&filterCountMask)+1) + if err != nil { + return err + } + + // Check padding + // Since headerLen is a multiple of 4 we don't need to check + // alignment. + k := r.Len() + // The standard spec says that the padding should have not more + // than 3 bytes. However we found paddings of 4 or 5 in the + // wild. See https://github.com/ulikunitz/xz/pull/11 and + // https://github.com/ulikunitz/xz/issues/15 + // + // The only reasonable approach seems to be to ignore the + // padding size. We still check that all padding bytes are zero. + if !allZeros(data[n-k : n]) { + return errPadding + } + return nil +} + +// MarshalBinary marshals the binary header. +func (h *blockHeader) MarshalBinary() (data []byte, err error) { + if !(minFilters <= len(h.filters) && len(h.filters) <= maxFilters) { + return nil, errors.New("xz: filter count wrong") + } + for i, f := range h.filters { + if i < len(h.filters)-1 { + if f.id() == lzmaFilterID { + return nil, errors.New( + "xz: LZMA2 filter is not the last") + } + } else { + // last filter + if f.id() != lzmaFilterID { + return nil, errors.New("xz: " + + "last filter must be the LZMA2 filter") + } + } + } + + var buf bytes.Buffer + // header size must set at the end + buf.WriteByte(0) + + // flags + flags := byte(len(h.filters) - 1) + if h.compressedSize >= 0 { + flags |= compressedSizePresent + } + if h.uncompressedSize >= 0 { + flags |= uncompressedSizePresent + } + buf.WriteByte(flags) + + p := make([]byte, 10) + if h.compressedSize >= 0 { + k := putUvarint(p, uint64(h.compressedSize)) + buf.Write(p[:k]) + } + if h.uncompressedSize >= 0 { + k := putUvarint(p, uint64(h.uncompressedSize)) + buf.Write(p[:k]) + } + + for _, f := range h.filters { + fp, err := f.MarshalBinary() + if err != nil { + return nil, err + } + buf.Write(fp) + } + + // padding + for i := padLen(int64(buf.Len())); i > 0; i-- { + buf.WriteByte(0) + } + + // crc place holder + buf.Write(p[:4]) + + data = buf.Bytes() + if len(data)%4 != 0 { + panic("data length not aligned") + } + s := len(data)/4 - 1 + if !(1 < s && s <= 255) { + panic("wrong block header size") + } + data[0] = byte(s) + + crc := crc32.NewIEEE() + crc.Write(data[:len(data)-4]) + putUint32LE(data[len(data)-4:], crc.Sum32()) + + return data, nil +} + +// Constants used for marshalling and unmarshalling filters in the xz +// block header. +const ( + minFilters = 1 + maxFilters = 4 + minReservedID = 1 << 62 +) + +// filter represents a filter in the block header. +type filter interface { + id() uint64 + UnmarshalBinary(data []byte) error + MarshalBinary() (data []byte, err error) + reader(r io.Reader, c *ReaderConfig) (fr io.Reader, err error) + writeCloser(w io.WriteCloser, c *WriterConfig) (fw io.WriteCloser, err error) + // filter must be last filter + last() bool +} + +// readFilter reads a block filter from the block header. At this point +// in time only the LZMA2 filter is supported. +func readFilter(r io.Reader) (f filter, err error) { + br := lzma.ByteReader(r) + + // index + id, _, err := readUvarint(br) + if err != nil { + return nil, err + } + + var data []byte + switch id { + case lzmaFilterID: + data = make([]byte, lzmaFilterLen) + data[0] = lzmaFilterID + if _, err = io.ReadFull(r, data[1:]); err != nil { + return nil, err + } + f = new(lzmaFilter) + default: + if id >= minReservedID { + return nil, errors.New( + "xz: reserved filter id in block stream header") + } + return nil, errors.New("xz: invalid filter id") + } + if err = f.UnmarshalBinary(data); err != nil { + return nil, err + } + return f, err +} + +// readFilters reads count filters. At this point in time only the count +// 1 is supported. +func readFilters(r io.Reader, count int) (filters []filter, err error) { + if count != 1 { + return nil, errors.New("xz: unsupported filter count") + } + f, err := readFilter(r) + if err != nil { + return nil, err + } + return []filter{f}, err +} + +/*** Index ***/ + +// record describes a block in the xz file index. +type record struct { + unpaddedSize int64 + uncompressedSize int64 +} + +// readRecord reads an index record. +func readRecord(r io.ByteReader) (rec record, n int, err error) { + u, k, err := readUvarint(r) + n += k + if err != nil { + return rec, n, err + } + rec.unpaddedSize = int64(u) + if rec.unpaddedSize < 0 { + return rec, n, errors.New("xz: unpadded size negative") + } + + u, k, err = readUvarint(r) + n += k + if err != nil { + return rec, n, err + } + rec.uncompressedSize = int64(u) + if rec.uncompressedSize < 0 { + return rec, n, errors.New("xz: uncompressed size negative") + } + + return rec, n, nil +} + +// MarshalBinary converts an index record in its binary encoding. +func (rec *record) MarshalBinary() (data []byte, err error) { + // maximum length of a uvarint is 10 + p := make([]byte, 20) + n := putUvarint(p, uint64(rec.unpaddedSize)) + n += putUvarint(p[n:], uint64(rec.uncompressedSize)) + return p[:n], nil +} + +// writeIndex writes the index, a sequence of records. +func writeIndex(w io.Writer, index []record) (n int64, err error) { + crc := crc32.NewIEEE() + mw := io.MultiWriter(w, crc) + + // index indicator + k, err := mw.Write([]byte{0}) + n += int64(k) + if err != nil { + return n, err + } + + // number of records + p := make([]byte, 10) + k = putUvarint(p, uint64(len(index))) + k, err = mw.Write(p[:k]) + n += int64(k) + if err != nil { + return n, err + } + + // list of records + for _, rec := range index { + p, err := rec.MarshalBinary() + if err != nil { + return n, err + } + k, err = mw.Write(p) + n += int64(k) + if err != nil { + return n, err + } + } + + // index padding + k, err = mw.Write(make([]byte, padLen(int64(n)))) + n += int64(k) + if err != nil { + return n, err + } + + // crc32 checksum + putUint32LE(p, crc.Sum32()) + k, err = w.Write(p[:4]) + n += int64(k) + + return n, err +} + +// readIndexBody reads the index from the reader. It assumes that the +// index indicator has already been read. +func readIndexBody(r io.Reader, expectedRecordLen int) (records []record, n int64, err error) { + crc := crc32.NewIEEE() + // index indicator + crc.Write([]byte{0}) + + br := lzma.ByteReader(io.TeeReader(r, crc)) + + // number of records + u, k, err := readUvarint(br) + n += int64(k) + if err != nil { + return nil, n, err + } + recLen := int(u) + if recLen < 0 || uint64(recLen) != u { + return nil, n, errors.New("xz: record number overflow") + } + if recLen != expectedRecordLen { + return nil, n, fmt.Errorf( + "xz: index length is %d; want %d", + recLen, expectedRecordLen) + } + + // list of records + records = make([]record, recLen) + for i := range records { + records[i], k, err = readRecord(br) + n += int64(k) + if err != nil { + return nil, n, err + } + } + + p := make([]byte, padLen(int64(n+1)), 4) + k, err = io.ReadFull(br.(io.Reader), p) + n += int64(k) + if err != nil { + return nil, n, err + } + if !allZeros(p) { + return nil, n, errors.New("xz: non-zero byte in index padding") + } + + // crc32 + s := crc.Sum32() + p = p[:4] + k, err = io.ReadFull(br.(io.Reader), p) + n += int64(k) + if err != nil { + return records, n, err + } + if uint32LE(p) != s { + return nil, n, errors.New("xz: wrong checksum for index") + } + + return records, n, nil +} diff --git a/vendor/github.com/ulikunitz/xz/fox-check-none.xz b/vendor/github.com/ulikunitz/xz/fox-check-none.xz new file mode 100644 index 0000000000..46043f7dc8 Binary files /dev/null and b/vendor/github.com/ulikunitz/xz/fox-check-none.xz differ diff --git a/vendor/github.com/ulikunitz/xz/fox.xz b/vendor/github.com/ulikunitz/xz/fox.xz new file mode 100644 index 0000000000..4b820bd5a1 Binary files /dev/null and b/vendor/github.com/ulikunitz/xz/fox.xz differ diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go new file mode 100644 index 0000000000..dae159db50 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go @@ -0,0 +1,181 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// CyclicPoly provides a cyclic polynomial rolling hash. +type CyclicPoly struct { + h uint64 + p []uint64 + i int +} + +// ror rotates the unsigned 64-bit integer to right. The argument s must be +// less than 64. +func ror(x uint64, s uint) uint64 { + return (x >> s) | (x << (64 - s)) +} + +// NewCyclicPoly creates a new instance of the CyclicPoly structure. The +// argument n gives the number of bytes for which a hash will be executed. +// This number must be positive; the method panics if this isn't the case. +func NewCyclicPoly(n int) *CyclicPoly { + if n < 1 { + panic("argument n must be positive") + } + return &CyclicPoly{p: make([]uint64, 0, n)} +} + +// Len returns the length of the byte sequence for which a hash is generated. +func (r *CyclicPoly) Len() int { + return cap(r.p) +} + +// RollByte hashes the next byte and returns a hash value. The complete becomes +// available after at least Len() bytes have been hashed. +func (r *CyclicPoly) RollByte(x byte) uint64 { + y := hash[x] + if len(r.p) < cap(r.p) { + r.h = ror(r.h, 1) ^ y + r.p = append(r.p, y) + } else { + r.h ^= ror(r.p[r.i], uint(cap(r.p)-1)) + r.h = ror(r.h, 1) ^ y + r.p[r.i] = y + r.i = (r.i + 1) % cap(r.p) + } + return r.h +} + +// Stores the hash for the individual bytes. +var hash = [256]uint64{ + 0x2e4fc3f904065142, 0xc790984cfbc99527, + 0x879f95eb8c62f187, 0x3b61be86b5021ef2, + 0x65a896a04196f0a5, 0xc5b307b80470b59e, + 0xd3bff376a70df14b, 0xc332f04f0b3f1701, + 0x753b5f0e9abf3e0d, 0xb41538fdfe66ef53, + 0x1906a10c2c1c0208, 0xfb0c712a03421c0d, + 0x38be311a65c9552b, 0xfee7ee4ca6445c7e, + 0x71aadeded184f21e, 0xd73426fccda23b2d, + 0x29773fb5fb9600b5, 0xce410261cd32981a, + 0xfe2848b3c62dbc2d, 0x459eaaff6e43e11c, + 0xc13e35fc9c73a887, 0xf30ed5c201e76dbc, + 0xa5f10b3910482cea, 0x2945d59be02dfaad, + 0x06ee334ff70571b5, 0xbabf9d8070f44380, + 0xee3e2e9912ffd27c, 0x2a7118d1ea6b8ea7, + 0x26183cb9f7b1664c, 0xea71dac7da068f21, + 0xea92eca5bd1d0bb7, 0x415595862defcd75, + 0x248a386023c60648, 0x9cf021ab284b3c8a, + 0xfc9372df02870f6c, 0x2b92d693eeb3b3fc, + 0x73e799d139dc6975, 0x7b15ae312486363c, + 0xb70e5454a2239c80, 0x208e3fb31d3b2263, + 0x01f563cabb930f44, 0x2ac4533d2a3240d8, + 0x84231ed1064f6f7c, 0xa9f020977c2a6d19, + 0x213c227271c20122, 0x09fe8a9a0a03d07a, + 0x4236dc75bcaf910c, 0x460a8b2bead8f17e, + 0xd9b27be1aa07055f, 0xd202d5dc4b11c33e, + 0x70adb010543bea12, 0xcdae938f7ea6f579, + 0x3f3d870208672f4d, 0x8e6ccbce9d349536, + 0xe4c0871a389095ae, 0xf5f2a49152bca080, + 0x9a43f9b97269934e, 0xc17b3753cb6f475c, + 0xd56d941e8e206bd4, 0xac0a4f3e525eda00, + 0xa06d5a011912a550, 0x5537ed19537ad1df, + 0xa32fe713d611449d, 0x2a1d05b47c3b579f, + 0x991d02dbd30a2a52, 0x39e91e7e28f93eb0, + 0x40d06adb3e92c9ac, 0x9b9d3afde1c77c97, + 0x9a3f3f41c02c616f, 0x22ecd4ba00f60c44, + 0x0b63d5d801708420, 0x8f227ca8f37ffaec, + 0x0256278670887c24, 0x107e14877dbf540b, + 0x32c19f2786ac1c05, 0x1df5b12bb4bc9c61, + 0xc0cac129d0d4c4e2, 0x9fdb52ee9800b001, + 0x31f601d5d31c48c4, 0x72ff3c0928bcaec7, + 0xd99264421147eb03, 0x535a2d6d38aefcfe, + 0x6ba8b4454a916237, 0xfa39366eaae4719c, + 0x10f00fd7bbb24b6f, 0x5bd23185c76c84d4, + 0xb22c3d7e1b00d33f, 0x3efc20aa6bc830a8, + 0xd61c2503fe639144, 0x30ce625441eb92d3, + 0xe5d34cf359e93100, 0xa8e5aa13f2b9f7a5, + 0x5c2b8d851ca254a6, 0x68fb6c5e8b0d5fdf, + 0xc7ea4872c96b83ae, 0x6dd5d376f4392382, + 0x1be88681aaa9792f, 0xfef465ee1b6c10d9, + 0x1f98b65ed43fcb2e, 0x4d1ca11eb6e9a9c9, + 0x7808e902b3857d0b, 0x171c9c4ea4607972, + 0x58d66274850146df, 0x42b311c10d3981d1, + 0x647fa8c621c41a4c, 0xf472771c66ddfedc, + 0x338d27e3f847b46b, 0x6402ce3da97545ce, + 0x5162db616fc38638, 0x9c83be97bc22a50e, + 0x2d3d7478a78d5e72, 0xe621a9b938fd5397, + 0x9454614eb0f81c45, 0x395fb6e742ed39b6, + 0x77dd9179d06037bf, 0xc478d0fee4d2656d, + 0x35d9d6cb772007af, 0x83a56e92c883f0f6, + 0x27937453250c00a1, 0x27bd6ebc3a46a97d, + 0x9f543bf784342d51, 0xd158f38c48b0ed52, + 0x8dd8537c045f66b4, 0x846a57230226f6d5, + 0x6b13939e0c4e7cdf, 0xfca25425d8176758, + 0x92e5fc6cd52788e6, 0x9992e13d7a739170, + 0x518246f7a199e8ea, 0xf104c2a71b9979c7, + 0x86b3ffaabea4768f, 0x6388061cf3e351ad, + 0x09d9b5295de5bbb5, 0x38bf1638c2599e92, + 0x1d759846499e148d, 0x4c0ff015e5f96ef4, + 0xa41a94cfa270f565, 0x42d76f9cb2326c0b, + 0x0cf385dd3c9c23ba, 0x0508a6c7508d6e7a, + 0x337523aabbe6cf8d, 0x646bb14001d42b12, + 0xc178729d138adc74, 0xf900ef4491f24086, + 0xee1a90d334bb5ac4, 0x9755c92247301a50, + 0xb999bf7c4ff1b610, 0x6aeeb2f3b21e8fc9, + 0x0fa8084cf91ac6ff, 0x10d226cf136e6189, + 0xd302057a07d4fb21, 0x5f03800e20a0fcc3, + 0x80118d4ae46bd210, 0x58ab61a522843733, + 0x51edd575c5432a4b, 0x94ee6ff67f9197f7, + 0x765669e0e5e8157b, 0xa5347830737132f0, + 0x3ba485a69f01510c, 0x0b247d7b957a01c3, + 0x1b3d63449fd807dc, 0x0fdc4721c30ad743, + 0x8b535ed3829b2b14, 0xee41d0cad65d232c, + 0xe6a99ed97a6a982f, 0x65ac6194c202003d, + 0x692accf3a70573eb, 0xcc3c02c3e200d5af, + 0x0d419e8b325914a3, 0x320f160f42c25e40, + 0x00710d647a51fe7a, 0x3c947692330aed60, + 0x9288aa280d355a7a, 0xa1806a9b791d1696, + 0x5d60e38496763da1, 0x6c69e22e613fd0f4, + 0x977fc2a5aadffb17, 0xfb7bd063fc5a94ba, + 0x460c17992cbaece1, 0xf7822c5444d3297f, + 0x344a9790c69b74aa, 0xb80a42e6cae09dce, + 0x1b1361eaf2b1e757, 0xd84c1e758e236f01, + 0x88e0b7be347627cc, 0x45246009b7a99490, + 0x8011c6dd3fe50472, 0xc341d682bffb99d7, + 0x2511be93808e2d15, 0xd5bc13d7fd739840, + 0x2a3cd030679ae1ec, 0x8ad9898a4b9ee157, + 0x3245fef0a8eaf521, 0x3d6d8dbbb427d2b0, + 0x1ed146d8968b3981, 0x0c6a28bf7d45f3fc, + 0x4a1fd3dbcee3c561, 0x4210ff6a476bf67e, + 0xa559cce0d9199aac, 0xde39d47ef3723380, + 0xe5b69d848ce42e35, 0xefa24296f8e79f52, + 0x70190b59db9a5afc, 0x26f166cdb211e7bf, + 0x4deaf2df3c6b8ef5, 0xf171dbdd670f1017, + 0xb9059b05e9420d90, 0x2f0da855c9388754, + 0x611d5e9ab77949cc, 0x2912038ac01163f4, + 0x0231df50402b2fba, 0x45660fc4f3245f58, + 0xb91cc97c7c8dac50, 0xb72d2aafe4953427, + 0xfa6463f87e813d6b, 0x4515f7ee95d5c6a2, + 0x1310e1c1a48d21c3, 0xad48a7810cdd8544, + 0x4d5bdfefd5c9e631, 0xa43ed43f1fdcb7de, + 0xe70cfc8fe1ee9626, 0xef4711b0d8dda442, + 0xb80dd9bd4dab6c93, 0xa23be08d31ba4d93, + 0x9b37db9d0335a39c, 0x494b6f870f5cfebc, + 0x6d1b3c1149dda943, 0x372c943a518c1093, + 0xad27af45e77c09c4, 0x3b6f92b646044604, + 0xac2917909f5fcf4f, 0x2069a60e977e5557, + 0x353a469e71014de5, 0x24be356281f55c15, + 0x2b6d710ba8e9adea, 0x404ad1751c749c29, + 0xed7311bf23d7f185, 0xba4f6976b4acc43e, + 0x32d7198d2bc39000, 0xee667019014d6e01, + 0x494ef3e128d14c83, 0x1f95a152baecd6be, + 0x201648dff1f483a5, 0x68c28550c8384af6, + 0x5fc834a6824a7f48, 0x7cd06cb7365eaf28, + 0xd82bbd95e9b30909, 0x234f0d1694c53f6d, + 0xd2fb7f4a96d83f4a, 0xff0d5da83acac05e, + 0xf8f6b97f5585080a, 0x74236084be57b95b, + 0xa25e40c03bbc36ad, 0x6b6e5c14ce88465b, + 0x4378ffe93e1528c5, 0x94ca92a17118e2d2, +} diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/doc.go b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go new file mode 100644 index 0000000000..b4cf8b75e4 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go @@ -0,0 +1,14 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package hash provides rolling hashes. + +Rolling hashes have to be used for maintaining the positions of n-byte +sequences in the dictionary buffer. + +The package provides currently the Rabin-Karp rolling hash and a Cyclic +Polynomial hash. Both support the Hashes method to be used with an interface. +*/ +package hash diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go new file mode 100644 index 0000000000..5322342ee3 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go @@ -0,0 +1,66 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// A is the default constant for Robin-Karp rolling hash. This is a random +// prime. +const A = 0x97b548add41d5da1 + +// RabinKarp supports the computation of a rolling hash. +type RabinKarp struct { + A uint64 + // a^n + aOldest uint64 + h uint64 + p []byte + i int +} + +// NewRabinKarp creates a new RabinKarp value. The argument n defines the +// length of the byte sequence to be hashed. The default constant will will be +// used. +func NewRabinKarp(n int) *RabinKarp { + return NewRabinKarpConst(n, A) +} + +// NewRabinKarpConst creates a new RabinKarp value. The argument n defines the +// length of the byte sequence to be hashed. The argument a provides the +// constant used to compute the hash. +func NewRabinKarpConst(n int, a uint64) *RabinKarp { + if n <= 0 { + panic("number of bytes n must be positive") + } + aOldest := uint64(1) + // There are faster methods. For the small n required by the LZMA + // compressor O(n) is sufficient. + for i := 0; i < n; i++ { + aOldest *= a + } + return &RabinKarp{ + A: a, aOldest: aOldest, + p: make([]byte, 0, n), + } +} + +// Len returns the length of the byte sequence. +func (r *RabinKarp) Len() int { + return cap(r.p) +} + +// RollByte computes the hash after x has been added. +func (r *RabinKarp) RollByte(x byte) uint64 { + if len(r.p) < cap(r.p) { + r.h += uint64(x) + r.h *= r.A + r.p = append(r.p, x) + } else { + r.h -= uint64(r.p[r.i]) * r.aOldest + r.h += uint64(x) + r.h *= r.A + r.p[r.i] = x + r.i = (r.i + 1) % cap(r.p) + } + return r.h +} diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/roller.go b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go new file mode 100644 index 0000000000..a989833561 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go @@ -0,0 +1,29 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hash + +// Roller provides an interface for rolling hashes. The hash value will become +// valid after hash has been called Len times. +type Roller interface { + Len() int + RollByte(x byte) uint64 +} + +// Hashes computes all hash values for the array p. Note that the state of the +// roller is changed. +func Hashes(r Roller, p []byte) []uint64 { + n := r.Len() + if len(p) < n { + return nil + } + h := make([]uint64, len(p)-n+1) + for i := 0; i < n-1; i++ { + r.RollByte(p[i]) + } + for i := range h { + h[i] = r.RollByte(p[i+n-1]) + } + return h +} diff --git a/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go new file mode 100644 index 0000000000..f4627ea113 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go @@ -0,0 +1,456 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xlog provides a simple logging package that allows to disable +// certain message categories. It defines a type, Logger, with multiple +// methods for formatting output. The package has also a predefined +// 'standard' Logger accessible through helper function Print[f|ln], +// Fatal[f|ln], Panic[f|ln], Warn[f|ln], Print[f|ln] and Debug[f|ln] +// that are easier to use then creating a Logger manually. That logger +// writes to standard error and prints the date and time of each logged +// message, which can be configured using the function SetFlags. +// +// The Fatal functions call os.Exit(1) after the message is output +// unless not suppressed by the flags. The Panic functions call panic +// after the writing the log message unless suppressed. +package xlog + +import ( + "fmt" + "io" + "os" + "runtime" + "sync" + "time" +) + +// The flags define what information is prefixed to each log entry +// generated by the Logger. The Lno* versions allow the suppression of +// specific output. The bits are or'ed together to control what will be +// printed. There is no control over the order of the items printed and +// the format. The full format is: +// +// 2009-01-23 01:23:23.123123 /a/b/c/d.go:23: message +const ( + Ldate = 1 << iota // the date: 2009-01-23 + Ltime // the time: 01:23:23 + Lmicroseconds // microsecond resolution: 01:23:23.123123 + Llongfile // full file name and line number: /a/b/c/d.go:23 + Lshortfile // final file name element and line number: d.go:23 + Lnopanic // suppresses output from Panic[f|ln] but not the panic call + Lnofatal // suppresses output from Fatal[f|ln] but not the exit + Lnowarn // suppresses output from Warn[f|ln] + Lnoprint // suppresses output from Print[f|ln] + Lnodebug // suppresses output from Debug[f|ln] + // initial values for the standard logger + Lstdflags = Ldate | Ltime | Lnodebug +) + +// A Logger represents an active logging object that generates lines of +// output to an io.Writer. Each logging operation if not suppressed +// makes a single call to the Writer's Write method. A Logger can be +// used simultaneously from multiple goroutines; it guarantees to +// serialize access to the Writer. +type Logger struct { + mu sync.Mutex // ensures atomic writes; and protects the following + // fields + prefix string // prefix to write at beginning of each line + flag int // properties + out io.Writer // destination for output + buf []byte // for accumulating text to write +} + +// New creates a new Logger. The out argument sets the destination to +// which the log output will be written. The prefix appears at the +// beginning of each log line. The flag argument defines the logging +// properties. +func New(out io.Writer, prefix string, flag int) *Logger { + return &Logger{out: out, prefix: prefix, flag: flag} +} + +// std is the standard logger used by the package scope functions. +var std = New(os.Stderr, "", Lstdflags) + +// itoa converts the integer to ASCII. A negative widths will avoid +// zero-padding. The function supports only non-negative integers. +func itoa(buf *[]byte, i int, wid int) { + var u = uint(i) + if u == 0 && wid <= 1 { + *buf = append(*buf, '0') + return + } + var b [32]byte + bp := len(b) + for ; u > 0 || wid > 0; u /= 10 { + bp-- + wid-- + b[bp] = byte(u%10) + '0' + } + *buf = append(*buf, b[bp:]...) +} + +// formatHeader puts the header into the buf field of the buffer. +func (l *Logger) formatHeader(t time.Time, file string, line int) { + l.buf = append(l.buf, l.prefix...) + if l.flag&(Ldate|Ltime|Lmicroseconds) != 0 { + if l.flag&Ldate != 0 { + year, month, day := t.Date() + itoa(&l.buf, year, 4) + l.buf = append(l.buf, '-') + itoa(&l.buf, int(month), 2) + l.buf = append(l.buf, '-') + itoa(&l.buf, day, 2) + l.buf = append(l.buf, ' ') + } + if l.flag&(Ltime|Lmicroseconds) != 0 { + hour, min, sec := t.Clock() + itoa(&l.buf, hour, 2) + l.buf = append(l.buf, ':') + itoa(&l.buf, min, 2) + l.buf = append(l.buf, ':') + itoa(&l.buf, sec, 2) + if l.flag&Lmicroseconds != 0 { + l.buf = append(l.buf, '.') + itoa(&l.buf, t.Nanosecond()/1e3, 6) + } + l.buf = append(l.buf, ' ') + } + } + if l.flag&(Lshortfile|Llongfile) != 0 { + if l.flag&Lshortfile != 0 { + short := file + for i := len(file) - 1; i > 0; i-- { + if file[i] == '/' { + short = file[i+1:] + break + } + } + file = short + } + l.buf = append(l.buf, file...) + l.buf = append(l.buf, ':') + itoa(&l.buf, line, -1) + l.buf = append(l.buf, ": "...) + } +} + +func (l *Logger) output(calldepth int, now time.Time, s string) error { + var file string + var line int + if l.flag&(Lshortfile|Llongfile) != 0 { + l.mu.Unlock() + var ok bool + _, file, line, ok = runtime.Caller(calldepth) + if !ok { + file = "???" + line = 0 + } + l.mu.Lock() + } + l.buf = l.buf[:0] + l.formatHeader(now, file, line) + l.buf = append(l.buf, s...) + if len(s) == 0 || s[len(s)-1] != '\n' { + l.buf = append(l.buf, '\n') + } + _, err := l.out.Write(l.buf) + return err +} + +// Output writes the string s with the header controlled by the flags to +// the l.out writer. A newline will be appended if s doesn't end in a +// newline. Calldepth is used to recover the PC, although all current +// calls of Output use the call depth 2. Access to the function is serialized. +func (l *Logger) Output(calldepth, noflag int, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprint(v...) + return l.output(calldepth+1, now, s) +} + +// Outputf works like output but formats the output like Printf. +func (l *Logger) Outputf(calldepth int, noflag int, format string, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprintf(format, v...) + return l.output(calldepth+1, now, s) +} + +// Outputln works like output but formats the output like Println. +func (l *Logger) Outputln(calldepth int, noflag int, v ...interface{}) error { + now := time.Now() + l.mu.Lock() + defer l.mu.Unlock() + if l.flag&noflag != 0 { + return nil + } + s := fmt.Sprintln(v...) + return l.output(calldepth+1, now, s) +} + +// Panic prints the message like Print and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panic(v ...interface{}) { + l.Output(2, Lnopanic, v...) + s := fmt.Sprint(v...) + panic(s) +} + +// Panic prints the message like Print and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panic(v ...interface{}) { + std.Output(2, Lnopanic, v...) + s := fmt.Sprint(v...) + panic(s) +} + +// Panicf prints the message like Printf and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panicf(format string, v ...interface{}) { + l.Outputf(2, Lnopanic, format, v...) + s := fmt.Sprintf(format, v...) + panic(s) +} + +// Panicf prints the message like Printf and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panicf(format string, v ...interface{}) { + std.Outputf(2, Lnopanic, format, v...) + s := fmt.Sprintf(format, v...) + panic(s) +} + +// Panicln prints the message like Println and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func (l *Logger) Panicln(v ...interface{}) { + l.Outputln(2, Lnopanic, v...) + s := fmt.Sprintln(v...) + panic(s) +} + +// Panicln prints the message like Println and calls panic. The printing +// might be suppressed by the flag Lnopanic. +func Panicln(v ...interface{}) { + std.Outputln(2, Lnopanic, v...) + s := fmt.Sprintln(v...) + panic(s) +} + +// Fatal prints the message like Print and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatal(v ...interface{}) { + l.Output(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatal prints the message like Print and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatal(v ...interface{}) { + std.Output(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatalf prints the message like Printf and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatalf(format string, v ...interface{}) { + l.Outputf(2, Lnofatal, format, v...) + os.Exit(1) +} + +// Fatalf prints the message like Printf and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatalf(format string, v ...interface{}) { + std.Outputf(2, Lnofatal, format, v...) + os.Exit(1) +} + +// Fatalln prints the message like Println and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func (l *Logger) Fatalln(format string, v ...interface{}) { + l.Outputln(2, Lnofatal, v...) + os.Exit(1) +} + +// Fatalln prints the message like Println and calls os.Exit(1). The +// printing might be suppressed by the flag Lnofatal. +func Fatalln(format string, v ...interface{}) { + std.Outputln(2, Lnofatal, v...) + os.Exit(1) +} + +// Warn prints the message like Print. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warn(v ...interface{}) { + l.Output(2, Lnowarn, v...) +} + +// Warn prints the message like Print. The printing might be suppressed +// by the flag Lnowarn. +func Warn(v ...interface{}) { + std.Output(2, Lnowarn, v...) +} + +// Warnf prints the message like Printf. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warnf(format string, v ...interface{}) { + l.Outputf(2, Lnowarn, format, v...) +} + +// Warnf prints the message like Printf. The printing might be suppressed +// by the flag Lnowarn. +func Warnf(format string, v ...interface{}) { + std.Outputf(2, Lnowarn, format, v...) +} + +// Warnln prints the message like Println. The printing might be suppressed +// by the flag Lnowarn. +func (l *Logger) Warnln(v ...interface{}) { + l.Outputln(2, Lnowarn, v...) +} + +// Warnln prints the message like Println. The printing might be suppressed +// by the flag Lnowarn. +func Warnln(v ...interface{}) { + std.Outputln(2, Lnowarn, v...) +} + +// Print prints the message like fmt.Print. The printing might be suppressed +// by the flag Lnoprint. +func (l *Logger) Print(v ...interface{}) { + l.Output(2, Lnoprint, v...) +} + +// Print prints the message like fmt.Print. The printing might be suppressed +// by the flag Lnoprint. +func Print(v ...interface{}) { + std.Output(2, Lnoprint, v...) +} + +// Printf prints the message like fmt.Printf. The printing might be suppressed +// by the flag Lnoprint. +func (l *Logger) Printf(format string, v ...interface{}) { + l.Outputf(2, Lnoprint, format, v...) +} + +// Printf prints the message like fmt.Printf. The printing might be suppressed +// by the flag Lnoprint. +func Printf(format string, v ...interface{}) { + std.Outputf(2, Lnoprint, format, v...) +} + +// Println prints the message like fmt.Println. The printing might be +// suppressed by the flag Lnoprint. +func (l *Logger) Println(v ...interface{}) { + l.Outputln(2, Lnoprint, v...) +} + +// Println prints the message like fmt.Println. The printing might be +// suppressed by the flag Lnoprint. +func Println(v ...interface{}) { + std.Outputln(2, Lnoprint, v...) +} + +// Debug prints the message like Print. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debug(v ...interface{}) { + l.Output(2, Lnodebug, v...) +} + +// Debug prints the message like Print. The printing might be suppressed +// by the flag Lnodebug. +func Debug(v ...interface{}) { + std.Output(2, Lnodebug, v...) +} + +// Debugf prints the message like Printf. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debugf(format string, v ...interface{}) { + l.Outputf(2, Lnodebug, format, v...) +} + +// Debugf prints the message like Printf. The printing might be suppressed +// by the flag Lnodebug. +func Debugf(format string, v ...interface{}) { + std.Outputf(2, Lnodebug, format, v...) +} + +// Debugln prints the message like Println. The printing might be suppressed +// by the flag Lnodebug. +func (l *Logger) Debugln(v ...interface{}) { + l.Outputln(2, Lnodebug, v...) +} + +// Debugln prints the message like Println. The printing might be suppressed +// by the flag Lnodebug. +func Debugln(v ...interface{}) { + std.Outputln(2, Lnodebug, v...) +} + +// Flags returns the current flags used by the logger. +func (l *Logger) Flags() int { + l.mu.Lock() + defer l.mu.Unlock() + return l.flag +} + +// Flags returns the current flags used by the standard logger. +func Flags() int { + return std.Flags() +} + +// SetFlags sets the flags of the logger. +func (l *Logger) SetFlags(flag int) { + l.mu.Lock() + defer l.mu.Unlock() + l.flag = flag +} + +// SetFlags sets the flags for the standard logger. +func SetFlags(flag int) { + std.SetFlags(flag) +} + +// Prefix returns the prefix used by the logger. +func (l *Logger) Prefix() string { + l.mu.Lock() + defer l.mu.Unlock() + return l.prefix +} + +// Prefix returns the prefix used by the standard logger of the package. +func Prefix() string { + return std.Prefix() +} + +// SetPrefix sets the prefix for the logger. +func (l *Logger) SetPrefix(prefix string) { + l.mu.Lock() + defer l.mu.Unlock() + l.prefix = prefix +} + +// SetPrefix sets the prefix of the standard logger of the package. +func SetPrefix(prefix string) { + std.SetPrefix(prefix) +} + +// SetOutput sets the output of the logger. +func (l *Logger) SetOutput(w io.Writer) { + l.mu.Lock() + defer l.mu.Unlock() + l.out = w +} + +// SetOutput sets the output for the standard logger of the package. +func SetOutput(w io.Writer) { + std.SetOutput(w) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bintree.go b/vendor/github.com/ulikunitz/xz/lzma/bintree.go new file mode 100644 index 0000000000..2b39da6f75 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bintree.go @@ -0,0 +1,522 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "unicode" +) + +// node represents a node in the binary tree. +type node struct { + // x is the search value + x uint32 + // p parent node + p uint32 + // l left child + l uint32 + // r right child + r uint32 +} + +// wordLen is the number of bytes represented by the v field of a node. +const wordLen = 4 + +// binTree supports the identification of the next operation based on a +// binary tree. +// +// Nodes will be identified by their index into the ring buffer. +type binTree struct { + dict *encoderDict + // ring buffer of nodes + node []node + // absolute offset of the entry for the next node. Position 4 + // byte larger. + hoff int64 + // front position in the node ring buffer + front uint32 + // index of the root node + root uint32 + // current x value + x uint32 + // preallocated array + data []byte +} + +// null represents the nonexistent index. We can't use zero because it +// would always exist or we would need to decrease the index for each +// reference. +const null uint32 = 1<<32 - 1 + +// newBinTree initializes the binTree structure. The capacity defines +// the size of the buffer and defines the maximum distance for which +// matches will be found. +func newBinTree(capacity int) (t *binTree, err error) { + if capacity < 1 { + return nil, errors.New( + "newBinTree: capacity must be larger than zero") + } + if int64(capacity) >= int64(null) { + return nil, errors.New( + "newBinTree: capacity must less 2^{32}-1") + } + t = &binTree{ + node: make([]node, capacity), + hoff: -int64(wordLen), + root: null, + data: make([]byte, maxMatchLen), + } + return t, nil +} + +func (t *binTree) SetDict(d *encoderDict) { t.dict = d } + +// WriteByte writes a single byte into the binary tree. +func (t *binTree) WriteByte(c byte) error { + t.x = (t.x << 8) | uint32(c) + t.hoff++ + if t.hoff < 0 { + return nil + } + v := t.front + if int64(v) < t.hoff { + // We are overwriting old nodes stored in the tree. + t.remove(v) + } + t.node[v].x = t.x + t.add(v) + t.front++ + if int64(t.front) >= int64(len(t.node)) { + t.front = 0 + } + return nil +} + +// Writes writes a sequence of bytes into the binTree structure. +func (t *binTree) Write(p []byte) (n int, err error) { + for _, c := range p { + t.WriteByte(c) + } + return len(p), nil +} + +// add puts the node v into the tree. The node must not be part of the +// tree before. +func (t *binTree) add(v uint32) { + vn := &t.node[v] + // Set left and right to null indices. + vn.l, vn.r = null, null + // If the binary tree is empty make v the root. + if t.root == null { + t.root = v + vn.p = null + return + } + x := vn.x + p := t.root + // Search for the right leave link and add the new node. + for { + pn := &t.node[p] + if x <= pn.x { + if pn.l == null { + pn.l = v + vn.p = p + return + } + p = pn.l + } else { + if pn.r == null { + pn.r = v + vn.p = p + return + } + p = pn.r + } + } +} + +// parent returns the parent node index of v and the pointer to v value +// in the parent. +func (t *binTree) parent(v uint32) (p uint32, ptr *uint32) { + if t.root == v { + return null, &t.root + } + p = t.node[v].p + if t.node[p].l == v { + ptr = &t.node[p].l + } else { + ptr = &t.node[p].r + } + return +} + +// Remove node v. +func (t *binTree) remove(v uint32) { + vn := &t.node[v] + p, ptr := t.parent(v) + l, r := vn.l, vn.r + if l == null { + // Move the right child up. + *ptr = r + if r != null { + t.node[r].p = p + } + return + } + if r == null { + // Move the left child up. + *ptr = l + t.node[l].p = p + return + } + + // Search the in-order predecessor u. + un := &t.node[l] + ur := un.r + if ur == null { + // In order predecessor is l. Move it up. + un.r = r + t.node[r].p = l + un.p = p + *ptr = l + return + } + var u uint32 + for { + // Look for the max value in the tree where l is root. + u = ur + ur = t.node[u].r + if ur == null { + break + } + } + // replace u with ul + un = &t.node[u] + ul := un.l + up := un.p + t.node[up].r = ul + if ul != null { + t.node[ul].p = up + } + + // replace v by u + un.l, un.r = l, r + t.node[l].p = u + t.node[r].p = u + *ptr = u + un.p = p +} + +// search looks for the node that have the value x or for the nodes that +// brace it. The node highest in the tree with the value x will be +// returned. All other nodes with the same value live in left subtree of +// the returned node. +func (t *binTree) search(v uint32, x uint32) (a, b uint32) { + a, b = null, null + if v == null { + return + } + for { + vn := &t.node[v] + if x <= vn.x { + if x == vn.x { + return v, v + } + b = v + if vn.l == null { + return + } + v = vn.l + } else { + a = v + if vn.r == null { + return + } + v = vn.r + } + } +} + +// max returns the node with maximum value in the subtree with v as +// root. +func (t *binTree) max(v uint32) uint32 { + if v == null { + return null + } + for { + r := t.node[v].r + if r == null { + return v + } + v = r + } +} + +// min returns the node with the minimum value in the subtree with v as +// root. +func (t *binTree) min(v uint32) uint32 { + if v == null { + return null + } + for { + l := t.node[v].l + if l == null { + return v + } + v = l + } +} + +// pred returns the in-order predecessor of node v. +func (t *binTree) pred(v uint32) uint32 { + if v == null { + return null + } + u := t.max(t.node[v].l) + if u != null { + return u + } + for { + p := t.node[v].p + if p == null { + return null + } + if t.node[p].r == v { + return p + } + v = p + } +} + +// succ returns the in-order successor of node v. +func (t *binTree) succ(v uint32) uint32 { + if v == null { + return null + } + u := t.min(t.node[v].r) + if u != null { + return u + } + for { + p := t.node[v].p + if p == null { + return null + } + if t.node[p].l == v { + return p + } + v = p + } +} + +// xval converts the first four bytes of a into an 32-bit unsigned +// integer in big-endian order. +func xval(a []byte) uint32 { + var x uint32 + switch len(a) { + default: + x |= uint32(a[3]) + fallthrough + case 3: + x |= uint32(a[2]) << 8 + fallthrough + case 2: + x |= uint32(a[1]) << 16 + fallthrough + case 1: + x |= uint32(a[0]) << 24 + case 0: + } + return x +} + +// dumpX converts value x into a four-letter string. +func dumpX(x uint32) string { + a := make([]byte, 4) + for i := 0; i < 4; i++ { + c := byte(x >> uint((3-i)*8)) + if unicode.IsGraphic(rune(c)) { + a[i] = c + } else { + a[i] = '.' + } + } + return string(a) +} + +/* +// dumpNode writes a representation of the node v into the io.Writer. +func (t *binTree) dumpNode(w io.Writer, v uint32, indent int) { + if v == null { + return + } + + vn := &t.node[v] + + t.dumpNode(w, vn.r, indent+2) + + for i := 0; i < indent; i++ { + fmt.Fprint(w, " ") + } + if vn.p == null { + fmt.Fprintf(w, "node %d %q parent null\n", v, dumpX(vn.x)) + } else { + fmt.Fprintf(w, "node %d %q parent %d\n", v, dumpX(vn.x), vn.p) + } + + t.dumpNode(w, vn.l, indent+2) +} + +// dump prints a representation of the binary tree into the writer. +func (t *binTree) dump(w io.Writer) error { + bw := bufio.NewWriter(w) + t.dumpNode(bw, t.root, 0) + return bw.Flush() +} +*/ + +func (t *binTree) distance(v uint32) int { + dist := int(t.front) - int(v) + if dist <= 0 { + dist += len(t.node) + } + return dist +} + +type matchParams struct { + rep [4]uint32 + // length when match will be accepted + nAccept int + // nodes to check + check int + // finish if length get shorter + stopShorter bool +} + +func (t *binTree) match(m match, distIter func() (int, bool), p matchParams, +) (r match, checked int, accepted bool) { + buf := &t.dict.buf + for { + if checked >= p.check { + return m, checked, true + } + dist, ok := distIter() + if !ok { + return m, checked, false + } + checked++ + if m.n > 0 { + i := buf.rear - dist + m.n - 1 + if i < 0 { + i += len(buf.data) + } else if i >= len(buf.data) { + i -= len(buf.data) + } + if buf.data[i] != t.data[m.n-1] { + if p.stopShorter { + return m, checked, false + } + continue + } + } + n := buf.matchLen(dist, t.data) + switch n { + case 0: + if p.stopShorter { + return m, checked, false + } + continue + case 1: + if uint32(dist-minDistance) != p.rep[0] { + continue + } + } + if n < m.n || (n == m.n && int64(dist) >= m.distance) { + continue + } + m = match{int64(dist), n} + if n >= p.nAccept { + return m, checked, true + } + } +} + +func (t *binTree) NextOp(rep [4]uint32) operation { + // retrieve maxMatchLen data + n, _ := t.dict.buf.Peek(t.data[:maxMatchLen]) + if n == 0 { + panic("no data in buffer") + } + t.data = t.data[:n] + + var ( + m match + x, u, v uint32 + iterPred, iterSucc func() (int, bool) + ) + p := matchParams{ + rep: rep, + nAccept: maxMatchLen, + check: 32, + } + i := 4 + iterSmall := func() (dist int, ok bool) { + i-- + if i <= 0 { + return 0, false + } + return i, true + } + m, checked, accepted := t.match(m, iterSmall, p) + if accepted { + goto end + } + p.check -= checked + x = xval(t.data) + u, v = t.search(t.root, x) + if u == v && len(t.data) == 4 { + iter := func() (dist int, ok bool) { + if u == null { + return 0, false + } + dist = t.distance(u) + u, v = t.search(t.node[u].l, x) + if u != v { + u = null + } + return dist, true + } + m, _, _ = t.match(m, iter, p) + goto end + } + p.stopShorter = true + iterSucc = func() (dist int, ok bool) { + if v == null { + return 0, false + } + dist = t.distance(v) + v = t.succ(v) + return dist, true + } + m, checked, accepted = t.match(m, iterSucc, p) + if accepted { + goto end + } + p.check -= checked + iterPred = func() (dist int, ok bool) { + if u == null { + return 0, false + } + dist = t.distance(u) + u = t.pred(u) + return dist, true + } + m, _, _ = t.match(m, iterPred, p) +end: + if m.n == 0 { + return lit{t.data[0]} + } + return m +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bitops.go b/vendor/github.com/ulikunitz/xz/lzma/bitops.go new file mode 100644 index 0000000000..2010917092 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bitops.go @@ -0,0 +1,47 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +/* Naming conventions follows the CodeReviewComments in the Go Wiki. */ + +// ntz32Const is used by the functions NTZ and NLZ. +const ntz32Const = 0x04d7651f + +// ntz32Table is a helper table for de Bruijn algorithm by Danny Dubé. +// See Henry S. Warren, Jr. "Hacker's Delight" section 5-1 figure 5-26. +var ntz32Table = [32]int8{ + 0, 1, 2, 24, 3, 19, 6, 25, + 22, 4, 20, 10, 16, 7, 12, 26, + 31, 23, 18, 5, 21, 9, 15, 11, + 30, 17, 8, 14, 29, 13, 28, 27, +} + +/* +// ntz32 computes the number of trailing zeros for an unsigned 32-bit integer. +func ntz32(x uint32) int { + if x == 0 { + return 32 + } + x = (x & -x) * ntz32Const + return int(ntz32Table[x>>27]) +} +*/ + +// nlz32 computes the number of leading zeros for an unsigned 32-bit integer. +func nlz32(x uint32) int { + // Smear left most bit to the right + x |= x >> 1 + x |= x >> 2 + x |= x >> 4 + x |= x >> 8 + x |= x >> 16 + // Use ntz mechanism to calculate nlz. + x++ + if x == 0 { + return 0 + } + x *= ntz32Const + return 32 - int(ntz32Table[x>>27]) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/breader.go b/vendor/github.com/ulikunitz/xz/lzma/breader.go new file mode 100644 index 0000000000..9dfdf28b22 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/breader.go @@ -0,0 +1,39 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// breader provides the ReadByte function for a Reader. It doesn't read +// more data from the reader than absolutely necessary. +type breader struct { + io.Reader + // helper slice to save allocations + p []byte +} + +// ByteReader converts an io.Reader into an io.ByteReader. +func ByteReader(r io.Reader) io.ByteReader { + br, ok := r.(io.ByteReader) + if !ok { + return &breader{r, make([]byte, 1)} + } + return br +} + +// ReadByte read byte function. +func (r *breader) ReadByte() (c byte, err error) { + n, err := r.Reader.Read(r.p) + if n < 1 { + if err == nil { + err = errors.New("breader.ReadByte: no data") + } + return 0, err + } + return r.p[0], nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/buffer.go b/vendor/github.com/ulikunitz/xz/lzma/buffer.go new file mode 100644 index 0000000000..af41d5b2dc --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/buffer.go @@ -0,0 +1,171 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" +) + +// buffer provides a circular buffer of bytes. If the front index equals +// the rear index the buffer is empty. As a consequence front cannot be +// equal rear for a full buffer. So a full buffer has a length that is +// one byte less the the length of the data slice. +type buffer struct { + data []byte + front int + rear int +} + +// newBuffer creates a buffer with the given size. +func newBuffer(size int) *buffer { + return &buffer{data: make([]byte, size+1)} +} + +// Cap returns the capacity of the buffer. +func (b *buffer) Cap() int { + return len(b.data) - 1 +} + +// Resets the buffer. The front and rear index are set to zero. +func (b *buffer) Reset() { + b.front = 0 + b.rear = 0 +} + +// Buffered returns the number of bytes buffered. +func (b *buffer) Buffered() int { + delta := b.front - b.rear + if delta < 0 { + delta += len(b.data) + } + return delta +} + +// Available returns the number of bytes available for writing. +func (b *buffer) Available() int { + delta := b.rear - 1 - b.front + if delta < 0 { + delta += len(b.data) + } + return delta +} + +// addIndex adds a non-negative integer to the index i and returns the +// resulting index. The function takes care of wrapping the index as +// well as potential overflow situations. +func (b *buffer) addIndex(i int, n int) int { + // subtraction of len(b.data) prevents overflow + i += n - len(b.data) + if i < 0 { + i += len(b.data) + } + return i +} + +// Read reads bytes from the buffer into p and returns the number of +// bytes read. The function never returns an error but might return less +// data than requested. +func (b *buffer) Read(p []byte) (n int, err error) { + n, err = b.Peek(p) + b.rear = b.addIndex(b.rear, n) + return n, err +} + +// Peek reads bytes from the buffer into p without changing the buffer. +// Peek will never return an error but might return less data than +// requested. +func (b *buffer) Peek(p []byte) (n int, err error) { + m := b.Buffered() + n = len(p) + if m < n { + n = m + p = p[:n] + } + k := copy(p, b.data[b.rear:]) + if k < n { + copy(p[k:], b.data) + } + return n, nil +} + +// Discard skips the n next bytes to read from the buffer, returning the +// bytes discarded. +// +// If Discards skips fewer than n bytes, it returns an error. +func (b *buffer) Discard(n int) (discarded int, err error) { + if n < 0 { + return 0, errors.New("buffer.Discard: negative argument") + } + m := b.Buffered() + if m < n { + n = m + err = errors.New( + "buffer.Discard: discarded less bytes then requested") + } + b.rear = b.addIndex(b.rear, n) + return n, err +} + +// ErrNoSpace indicates that there is insufficient space for the Write +// operation. +var ErrNoSpace = errors.New("insufficient space") + +// Write puts data into the buffer. If less bytes are written than +// requested ErrNoSpace is returned. +func (b *buffer) Write(p []byte) (n int, err error) { + m := b.Available() + n = len(p) + if m < n { + n = m + p = p[:m] + err = ErrNoSpace + } + k := copy(b.data[b.front:], p) + if k < n { + copy(b.data, p[k:]) + } + b.front = b.addIndex(b.front, n) + return n, err +} + +// WriteByte writes a single byte into the buffer. The error ErrNoSpace +// is returned if no single byte is available in the buffer for writing. +func (b *buffer) WriteByte(c byte) error { + if b.Available() < 1 { + return ErrNoSpace + } + b.data[b.front] = c + b.front = b.addIndex(b.front, 1) + return nil +} + +// prefixLen returns the length of the common prefix of a and b. +func prefixLen(a, b []byte) int { + if len(a) > len(b) { + a, b = b, a + } + for i, c := range a { + if b[i] != c { + return i + } + } + return len(a) +} + +// matchLen returns the length of the common prefix for the given +// distance from the rear and the byte slice p. +func (b *buffer) matchLen(distance int, p []byte) int { + var n int + i := b.rear - distance + if i < 0 { + if n = prefixLen(p, b.data[len(b.data)+i:]); n < -i { + return n + } + p = p[n:] + i = 0 + } + n += prefixLen(p, b.data[i:]) + return n +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go new file mode 100644 index 0000000000..f27e31a4a8 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go @@ -0,0 +1,37 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// ErrLimit indicates that the limit of the LimitedByteWriter has been +// reached. +var ErrLimit = errors.New("limit reached") + +// LimitedByteWriter provides a byte writer that can be written until a +// limit is reached. The field N provides the number of remaining +// bytes. +type LimitedByteWriter struct { + BW io.ByteWriter + N int64 +} + +// WriteByte writes a single byte to the limited byte writer. It returns +// ErrLimit if the limit has been reached. If the byte is successfully +// written the field N of the LimitedByteWriter will be decremented by +// one. +func (l *LimitedByteWriter) WriteByte(c byte) error { + if l.N <= 0 { + return ErrLimit + } + if err := l.BW.WriteByte(c); err != nil { + return err + } + l.N-- + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoder.go b/vendor/github.com/ulikunitz/xz/lzma/decoder.go new file mode 100644 index 0000000000..3765484e61 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/decoder.go @@ -0,0 +1,277 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +// decoder decodes a raw LZMA stream without any header. +type decoder struct { + // dictionary; the rear pointer of the buffer will be used for + // reading the data. + Dict *decoderDict + // decoder state + State *state + // range decoder + rd *rangeDecoder + // start stores the head value of the dictionary for the LZMA + // stream + start int64 + // size of uncompressed data + size int64 + // end-of-stream encountered + eos bool + // EOS marker found + eosMarker bool +} + +// newDecoder creates a new decoder instance. The parameter size provides +// the expected byte size of the decompressed data. If the size is +// unknown use a negative value. In that case the decoder will look for +// a terminating end-of-stream marker. +func newDecoder(br io.ByteReader, state *state, dict *decoderDict, size int64) (d *decoder, err error) { + rd, err := newRangeDecoder(br) + if err != nil { + return nil, err + } + d = &decoder{ + State: state, + Dict: dict, + rd: rd, + size: size, + start: dict.pos(), + } + return d, nil +} + +// Reopen restarts the decoder with a new byte reader and a new size. Reopen +// resets the Decompressed counter to zero. +func (d *decoder) Reopen(br io.ByteReader, size int64) error { + var err error + if d.rd, err = newRangeDecoder(br); err != nil { + return err + } + d.start = d.Dict.pos() + d.size = size + d.eos = false + return nil +} + +// decodeLiteral decodes a single literal from the LZMA stream. +func (d *decoder) decodeLiteral() (op operation, err error) { + litState := d.State.litState(d.Dict.byteAt(1), d.Dict.head) + match := d.Dict.byteAt(int(d.State.rep[0]) + 1) + s, err := d.State.litCodec.Decode(d.rd, d.State.state, match, litState) + if err != nil { + return nil, err + } + return lit{s}, nil +} + +// errEOS indicates that an EOS marker has been found. +var errEOS = errors.New("EOS marker found") + +// readOp decodes the next operation from the compressed stream. It +// returns the operation. If an explicit end of stream marker is +// identified the eos error is returned. +func (d *decoder) readOp() (op operation, err error) { + // Value of the end of stream (EOS) marker + const eosDist = 1<<32 - 1 + + state, state2, posState := d.State.states(d.Dict.head) + + b, err := d.State.isMatch[state2].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + // literal + op, err := d.decodeLiteral() + if err != nil { + return nil, err + } + d.State.updateStateLiteral() + return op, nil + } + b, err = d.State.isRep[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + // simple match + d.State.rep[3], d.State.rep[2], d.State.rep[1] = + d.State.rep[2], d.State.rep[1], d.State.rep[0] + + d.State.updateStateMatch() + // The length decoder returns the length offset. + n, err := d.State.lenCodec.Decode(d.rd, posState) + if err != nil { + return nil, err + } + // The dist decoder returns the distance offset. The actual + // distance is 1 higher. + d.State.rep[0], err = d.State.distCodec.Decode(d.rd, n) + if err != nil { + return nil, err + } + if d.State.rep[0] == eosDist { + d.eosMarker = true + return nil, errEOS + } + op = match{n: int(n) + minMatchLen, + distance: int64(d.State.rep[0]) + minDistance} + return op, nil + } + b, err = d.State.isRepG0[state].Decode(d.rd) + if err != nil { + return nil, err + } + dist := d.State.rep[0] + if b == 0 { + // rep match 0 + b, err = d.State.isRepG0Long[state2].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + d.State.updateStateShortRep() + op = match{n: 1, distance: int64(dist) + minDistance} + return op, nil + } + } else { + b, err = d.State.isRepG1[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + dist = d.State.rep[1] + } else { + b, err = d.State.isRepG2[state].Decode(d.rd) + if err != nil { + return nil, err + } + if b == 0 { + dist = d.State.rep[2] + } else { + dist = d.State.rep[3] + d.State.rep[3] = d.State.rep[2] + } + d.State.rep[2] = d.State.rep[1] + } + d.State.rep[1] = d.State.rep[0] + d.State.rep[0] = dist + } + n, err := d.State.repLenCodec.Decode(d.rd, posState) + if err != nil { + return nil, err + } + d.State.updateStateRep() + op = match{n: int(n) + minMatchLen, distance: int64(dist) + minDistance} + return op, nil +} + +// apply takes the operation and transforms the decoder dictionary accordingly. +func (d *decoder) apply(op operation) error { + var err error + switch x := op.(type) { + case match: + err = d.Dict.writeMatch(x.distance, x.n) + case lit: + err = d.Dict.WriteByte(x.b) + default: + panic("op is neither a match nor a literal") + } + return err +} + +// decompress fills the dictionary unless no space for new data is +// available. If the end of the LZMA stream has been reached io.EOF will +// be returned. +func (d *decoder) decompress() error { + if d.eos { + return io.EOF + } + for d.Dict.Available() >= maxMatchLen { + op, err := d.readOp() + switch err { + case nil: + // break + case errEOS: + d.eos = true + if !d.rd.possiblyAtEnd() { + return errDataAfterEOS + } + if d.size >= 0 && d.size != d.Decompressed() { + return errSize + } + return io.EOF + case io.EOF: + d.eos = true + return io.ErrUnexpectedEOF + default: + return err + } + if err = d.apply(op); err != nil { + return err + } + if d.size >= 0 && d.Decompressed() >= d.size { + d.eos = true + if d.Decompressed() > d.size { + return errSize + } + if !d.rd.possiblyAtEnd() { + switch _, err = d.readOp(); err { + case nil: + return errSize + case io.EOF: + return io.ErrUnexpectedEOF + case errEOS: + break + default: + return err + } + } + return io.EOF + } + } + return nil +} + +// Errors that may be returned while decoding data. +var ( + errDataAfterEOS = errors.New("lzma: data after end of stream marker") + errSize = errors.New("lzma: wrong uncompressed data size") +) + +// Read reads data from the buffer. If no more data is available io.EOF is +// returned. +func (d *decoder) Read(p []byte) (n int, err error) { + var k int + for { + // Read of decoder dict never returns an error. + k, err = d.Dict.Read(p[n:]) + if err != nil { + panic(fmt.Errorf("dictionary read error %s", err)) + } + if k == 0 && d.eos { + return n, io.EOF + } + n += k + if n >= len(p) { + return n, nil + } + if err = d.decompress(); err != nil && err != io.EOF { + return n, err + } + } +} + +// Decompressed returns the number of bytes decompressed by the decoder. +func (d *decoder) Decompressed() int64 { + return d.Dict.pos() - d.start +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go new file mode 100644 index 0000000000..d5b814f0a4 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go @@ -0,0 +1,128 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// decoderDict provides the dictionary for the decoder. The whole +// dictionary is used as reader buffer. +type decoderDict struct { + buf buffer + head int64 +} + +// newDecoderDict creates a new decoder dictionary. The whole dictionary +// will be used as reader buffer. +func newDecoderDict(dictCap int) (d *decoderDict, err error) { + // lower limit supports easy test cases + if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { + return nil, errors.New("lzma: dictCap out of range") + } + d = &decoderDict{buf: *newBuffer(dictCap)} + return d, nil +} + +// Reset clears the dictionary. The read buffer is not changed, so the +// buffered data can still be read. +func (d *decoderDict) Reset() { + d.head = 0 +} + +// WriteByte writes a single byte into the dictionary. It is used to +// write literals into the dictionary. +func (d *decoderDict) WriteByte(c byte) error { + if err := d.buf.WriteByte(c); err != nil { + return err + } + d.head++ + return nil +} + +// pos returns the position of the dictionary head. +func (d *decoderDict) pos() int64 { return d.head } + +// dictLen returns the actual length of the dictionary. +func (d *decoderDict) dictLen() int { + capacity := d.buf.Cap() + if d.head >= int64(capacity) { + return capacity + } + return int(d.head) +} + +// byteAt returns a byte stored in the dictionary. If the distance is +// non-positive or exceeds the current length of the dictionary the zero +// byte is returned. +func (d *decoderDict) byteAt(dist int) byte { + if !(0 < dist && dist <= d.dictLen()) { + return 0 + } + i := d.buf.front - dist + if i < 0 { + i += len(d.buf.data) + } + return d.buf.data[i] +} + +// writeMatch writes the match at the top of the dictionary. The given +// distance must point in the current dictionary and the length must not +// exceed the maximum length 273 supported in LZMA. +// +// The error value ErrNoSpace indicates that no space is available in +// the dictionary for writing. You need to read from the dictionary +// first. +func (d *decoderDict) writeMatch(dist int64, length int) error { + if !(0 < dist && dist <= int64(d.dictLen())) { + return errors.New("writeMatch: distance out of range") + } + if !(0 < length && length <= maxMatchLen) { + return errors.New("writeMatch: length out of range") + } + if length > d.buf.Available() { + return ErrNoSpace + } + d.head += int64(length) + + i := d.buf.front - int(dist) + if i < 0 { + i += len(d.buf.data) + } + for length > 0 { + var p []byte + if i >= d.buf.front { + p = d.buf.data[i:] + i = 0 + } else { + p = d.buf.data[i:d.buf.front] + i = d.buf.front + } + if len(p) > length { + p = p[:length] + } + if _, err := d.buf.Write(p); err != nil { + panic(fmt.Errorf("d.buf.Write returned error %s", err)) + } + length -= len(p) + } + return nil +} + +// Write writes the given bytes into the dictionary and advances the +// head. +func (d *decoderDict) Write(p []byte) (n int, err error) { + n, err = d.buf.Write(p) + d.head += int64(n) + return n, err +} + +// Available returns the number of available bytes for writing into the +// decoder dictionary. +func (d *decoderDict) Available() int { return d.buf.Available() } + +// Read reads data from the buffer contained in the decoder dictionary. +func (d *decoderDict) Read(p []byte) (n int, err error) { return d.buf.Read(p) } diff --git a/vendor/github.com/ulikunitz/xz/lzma/directcodec.go b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go new file mode 100644 index 0000000000..76b713106f --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go @@ -0,0 +1,38 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// directCodec allows the encoding and decoding of values with a fixed number +// of bits. The number of bits must be in the range [1,32]. +type directCodec byte + +// Bits returns the number of bits supported by this codec. +func (dc directCodec) Bits() int { + return int(dc) +} + +// Encode uses the range encoder to encode a value with the fixed number of +// bits. The most-significant bit is encoded first. +func (dc directCodec) Encode(e *rangeEncoder, v uint32) error { + for i := int(dc) - 1; i >= 0; i-- { + if err := e.DirectEncodeBit(v >> uint(i)); err != nil { + return err + } + } + return nil +} + +// Decode uses the range decoder to decode a value with the given number of +// given bits. The most-significant bit is decoded first. +func (dc directCodec) Decode(d *rangeDecoder) (v uint32, err error) { + for i := int(dc) - 1; i >= 0; i-- { + x, err := d.DirectDecodeBit() + if err != nil { + return 0, err + } + v = (v << 1) | x + } + return v, nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/distcodec.go b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go new file mode 100644 index 0000000000..b447d8ec42 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go @@ -0,0 +1,140 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// Constants used by the distance codec. +const ( + // minimum supported distance + minDistance = 1 + // maximum supported distance, value is used for the eos marker. + maxDistance = 1 << 32 + // number of the supported len states + lenStates = 4 + // start for the position models + startPosModel = 4 + // first index with align bits support + endPosModel = 14 + // bits for the position slots + posSlotBits = 6 + // number of align bits + alignBits = 4 +) + +// distCodec provides encoding and decoding of distance values. +type distCodec struct { + posSlotCodecs [lenStates]treeCodec + posModel [endPosModel - startPosModel]treeReverseCodec + alignCodec treeReverseCodec +} + +// deepcopy initializes dc as deep copy of the source. +func (dc *distCodec) deepcopy(src *distCodec) { + if dc == src { + return + } + for i := range dc.posSlotCodecs { + dc.posSlotCodecs[i].deepcopy(&src.posSlotCodecs[i]) + } + for i := range dc.posModel { + dc.posModel[i].deepcopy(&src.posModel[i]) + } + dc.alignCodec.deepcopy(&src.alignCodec) +} + +// newDistCodec creates a new distance codec. +func (dc *distCodec) init() { + for i := range dc.posSlotCodecs { + dc.posSlotCodecs[i] = makeTreeCodec(posSlotBits) + } + for i := range dc.posModel { + posSlot := startPosModel + i + bits := (posSlot >> 1) - 1 + dc.posModel[i] = makeTreeReverseCodec(bits) + } + dc.alignCodec = makeTreeReverseCodec(alignBits) +} + +// lenState converts the value l to a supported lenState value. +func lenState(l uint32) uint32 { + if l >= lenStates { + l = lenStates - 1 + } + return l +} + +// Encode encodes the distance using the parameter l. Dist can have values from +// the full range of uint32 values. To get the distance offset the actual match +// distance has to be decreased by 1. A distance offset of 0xffffffff (eos) +// indicates the end of the stream. +func (dc *distCodec) Encode(e *rangeEncoder, dist uint32, l uint32) (err error) { + // Compute the posSlot using nlz32 + var posSlot uint32 + var bits uint32 + if dist < startPosModel { + posSlot = dist + } else { + bits = uint32(30 - nlz32(dist)) + posSlot = startPosModel - 2 + (bits << 1) + posSlot += (dist >> uint(bits)) & 1 + } + + if err = dc.posSlotCodecs[lenState(l)].Encode(e, posSlot); err != nil { + return + } + + switch { + case posSlot < startPosModel: + return nil + case posSlot < endPosModel: + tc := &dc.posModel[posSlot-startPosModel] + return tc.Encode(dist, e) + } + dic := directCodec(bits - alignBits) + if err = dic.Encode(e, dist>>alignBits); err != nil { + return + } + return dc.alignCodec.Encode(dist, e) +} + +// Decode decodes the distance offset using the parameter l. The dist value +// 0xffffffff (eos) indicates the end of the stream. Add one to the distance +// offset to get the actual match distance. +func (dc *distCodec) Decode(d *rangeDecoder, l uint32) (dist uint32, err error) { + posSlot, err := dc.posSlotCodecs[lenState(l)].Decode(d) + if err != nil { + return + } + + // posSlot equals distance + if posSlot < startPosModel { + return posSlot, nil + } + + // posSlot uses the individual models + bits := (posSlot >> 1) - 1 + dist = (2 | (posSlot & 1)) << bits + var u uint32 + if posSlot < endPosModel { + tc := &dc.posModel[posSlot-startPosModel] + if u, err = tc.Decode(d); err != nil { + return 0, err + } + dist += u + return dist, nil + } + + // posSlots use direct encoding and a single model for the four align + // bits. + dic := directCodec(bits - alignBits) + if u, err = dic.Decode(d); err != nil { + return 0, err + } + dist += u << alignBits + if u, err = dc.alignCodec.Decode(d); err != nil { + return 0, err + } + dist += u + return dist, nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoder.go b/vendor/github.com/ulikunitz/xz/lzma/encoder.go new file mode 100644 index 0000000000..e409383181 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/encoder.go @@ -0,0 +1,268 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "fmt" + "io" +) + +// opLenMargin provides the upper limit of the number of bytes required +// to encode a single operation. +const opLenMargin = 16 + +// compressFlags control the compression process. +type compressFlags uint32 + +// Values for compressFlags. +const ( + // all data should be compressed, even if compression is not + // optimal. + all compressFlags = 1 << iota +) + +// encoderFlags provide the flags for an encoder. +type encoderFlags uint32 + +// Flags for the encoder. +const ( + // eosMarker requests an EOS marker to be written. + eosMarker encoderFlags = 1 << iota +) + +// Encoder compresses data buffered in the encoder dictionary and writes +// it into a byte writer. +type encoder struct { + dict *encoderDict + state *state + re *rangeEncoder + start int64 + // generate eos marker + marker bool + limit bool + margin int +} + +// newEncoder creates a new encoder. If the byte writer must be +// limited use LimitedByteWriter provided by this package. The flags +// argument supports the eosMarker flag, controlling whether a +// terminating end-of-stream marker must be written. +func newEncoder(bw io.ByteWriter, state *state, dict *encoderDict, + flags encoderFlags) (e *encoder, err error) { + + re, err := newRangeEncoder(bw) + if err != nil { + return nil, err + } + e = &encoder{ + dict: dict, + state: state, + re: re, + marker: flags&eosMarker != 0, + start: dict.Pos(), + margin: opLenMargin, + } + if e.marker { + e.margin += 5 + } + return e, nil +} + +// Write writes the bytes from p into the dictionary. If not enough +// space is available the data in the dictionary buffer will be +// compressed to make additional space available. If the limit of the +// underlying writer has been reached ErrLimit will be returned. +func (e *encoder) Write(p []byte) (n int, err error) { + for { + k, err := e.dict.Write(p[n:]) + n += k + if err == ErrNoSpace { + if err = e.compress(0); err != nil { + return n, err + } + continue + } + return n, err + } +} + +// Reopen reopens the encoder with a new byte writer. +func (e *encoder) Reopen(bw io.ByteWriter) error { + var err error + if e.re, err = newRangeEncoder(bw); err != nil { + return err + } + e.start = e.dict.Pos() + e.limit = false + return nil +} + +// writeLiteral writes a literal into the LZMA stream +func (e *encoder) writeLiteral(l lit) error { + var err error + state, state2, _ := e.state.states(e.dict.Pos()) + if err = e.state.isMatch[state2].Encode(e.re, 0); err != nil { + return err + } + litState := e.state.litState(e.dict.ByteAt(1), e.dict.Pos()) + match := e.dict.ByteAt(int(e.state.rep[0]) + 1) + err = e.state.litCodec.Encode(e.re, l.b, state, match, litState) + if err != nil { + return err + } + e.state.updateStateLiteral() + return nil +} + +// iverson implements the Iverson operator as proposed by Donald Knuth in his +// book Concrete Mathematics. +func iverson(ok bool) uint32 { + if ok { + return 1 + } + return 0 +} + +// writeMatch writes a repetition operation into the operation stream +func (e *encoder) writeMatch(m match) error { + var err error + if !(minDistance <= m.distance && m.distance <= maxDistance) { + panic(fmt.Errorf("match distance %d out of range", m.distance)) + } + dist := uint32(m.distance - minDistance) + if !(minMatchLen <= m.n && m.n <= maxMatchLen) && + !(dist == e.state.rep[0] && m.n == 1) { + panic(fmt.Errorf( + "match length %d out of range; dist %d rep[0] %d", + m.n, dist, e.state.rep[0])) + } + state, state2, posState := e.state.states(e.dict.Pos()) + if err = e.state.isMatch[state2].Encode(e.re, 1); err != nil { + return err + } + g := 0 + for ; g < 4; g++ { + if e.state.rep[g] == dist { + break + } + } + b := iverson(g < 4) + if err = e.state.isRep[state].Encode(e.re, b); err != nil { + return err + } + n := uint32(m.n - minMatchLen) + if b == 0 { + // simple match + e.state.rep[3], e.state.rep[2], e.state.rep[1], e.state.rep[0] = + e.state.rep[2], e.state.rep[1], e.state.rep[0], dist + e.state.updateStateMatch() + if err = e.state.lenCodec.Encode(e.re, n, posState); err != nil { + return err + } + return e.state.distCodec.Encode(e.re, dist, n) + } + b = iverson(g != 0) + if err = e.state.isRepG0[state].Encode(e.re, b); err != nil { + return err + } + if b == 0 { + // g == 0 + b = iverson(m.n != 1) + if err = e.state.isRepG0Long[state2].Encode(e.re, b); err != nil { + return err + } + if b == 0 { + e.state.updateStateShortRep() + return nil + } + } else { + // g in {1,2,3} + b = iverson(g != 1) + if err = e.state.isRepG1[state].Encode(e.re, b); err != nil { + return err + } + if b == 1 { + // g in {2,3} + b = iverson(g != 2) + err = e.state.isRepG2[state].Encode(e.re, b) + if err != nil { + return err + } + if b == 1 { + e.state.rep[3] = e.state.rep[2] + } + e.state.rep[2] = e.state.rep[1] + } + e.state.rep[1] = e.state.rep[0] + e.state.rep[0] = dist + } + e.state.updateStateRep() + return e.state.repLenCodec.Encode(e.re, n, posState) +} + +// writeOp writes a single operation to the range encoder. The function +// checks whether there is enough space available to close the LZMA +// stream. +func (e *encoder) writeOp(op operation) error { + if e.re.Available() < int64(e.margin) { + return ErrLimit + } + switch x := op.(type) { + case lit: + return e.writeLiteral(x) + case match: + return e.writeMatch(x) + default: + panic("unexpected operation") + } +} + +// compress compressed data from the dictionary buffer. If the flag all +// is set, all data in the dictionary buffer will be compressed. The +// function returns ErrLimit if the underlying writer has reached its +// limit. +func (e *encoder) compress(flags compressFlags) error { + n := 0 + if flags&all == 0 { + n = maxMatchLen - 1 + } + d := e.dict + m := d.m + for d.Buffered() > n { + op := m.NextOp(e.state.rep) + if err := e.writeOp(op); err != nil { + return err + } + d.Discard(op.Len()) + } + return nil +} + +// eosMatch is a pseudo operation that indicates the end of the stream. +var eosMatch = match{distance: maxDistance, n: minMatchLen} + +// Close terminates the LZMA stream. If requested the end-of-stream +// marker will be written. If the byte writer limit has been or will be +// reached during compression of the remaining data in the buffer the +// LZMA stream will be closed and data will remain in the buffer. +func (e *encoder) Close() error { + err := e.compress(all) + if err != nil && err != ErrLimit { + return err + } + if e.marker { + if err := e.writeMatch(eosMatch); err != nil { + return err + } + } + err = e.re.Close() + return err +} + +// Compressed returns the number bytes of the input data that been +// compressed. +func (e *encoder) Compressed() int64 { + return e.dict.Pos() - e.start +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go new file mode 100644 index 0000000000..4b3916eaba --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go @@ -0,0 +1,149 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +// matcher is an interface that supports the identification of the next +// operation. +type matcher interface { + io.Writer + SetDict(d *encoderDict) + NextOp(rep [4]uint32) operation +} + +// encoderDict provides the dictionary of the encoder. It includes an +// additional buffer atop of the actual dictionary. +type encoderDict struct { + buf buffer + m matcher + head int64 + capacity int + // preallocated array + data [maxMatchLen]byte +} + +// newEncoderDict creates the encoder dictionary. The argument bufSize +// defines the size of the additional buffer. +func newEncoderDict(dictCap, bufSize int, m matcher) (d *encoderDict, err error) { + if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) { + return nil, errors.New( + "lzma: dictionary capacity out of range") + } + if bufSize < 1 { + return nil, errors.New( + "lzma: buffer size must be larger than zero") + } + d = &encoderDict{ + buf: *newBuffer(dictCap + bufSize), + capacity: dictCap, + m: m, + } + m.SetDict(d) + return d, nil +} + +// Discard discards n bytes. Note that n must not be larger than +// MaxMatchLen. +func (d *encoderDict) Discard(n int) { + p := d.data[:n] + k, _ := d.buf.Read(p) + if k < n { + panic(fmt.Errorf("lzma: can't discard %d bytes", n)) + } + d.head += int64(n) + d.m.Write(p) +} + +// Len returns the data available in the encoder dictionary. +func (d *encoderDict) Len() int { + n := d.buf.Available() + if int64(n) > d.head { + return int(d.head) + } + return n +} + +// DictLen returns the actual length of data in the dictionary. +func (d *encoderDict) DictLen() int { + if d.head < int64(d.capacity) { + return int(d.head) + } + return d.capacity +} + +// Available returns the number of bytes that can be written by a +// following Write call. +func (d *encoderDict) Available() int { + return d.buf.Available() - d.DictLen() +} + +// Write writes data into the dictionary buffer. Note that the position +// of the dictionary head will not be moved. If there is not enough +// space in the buffer ErrNoSpace will be returned. +func (d *encoderDict) Write(p []byte) (n int, err error) { + m := d.Available() + if len(p) > m { + p = p[:m] + err = ErrNoSpace + } + var e error + if n, e = d.buf.Write(p); e != nil { + err = e + } + return n, err +} + +// Pos returns the position of the head. +func (d *encoderDict) Pos() int64 { return d.head } + +// ByteAt returns the byte at the given distance. +func (d *encoderDict) ByteAt(distance int) byte { + if !(0 < distance && distance <= d.Len()) { + return 0 + } + i := d.buf.rear - distance + if i < 0 { + i += len(d.buf.data) + } + return d.buf.data[i] +} + +// CopyN copies the last n bytes from the dictionary into the provided +// writer. This is used for copying uncompressed data into an +// uncompressed segment. +func (d *encoderDict) CopyN(w io.Writer, n int) (written int, err error) { + if n <= 0 { + return 0, nil + } + m := d.Len() + if n > m { + n = m + err = ErrNoSpace + } + i := d.buf.rear - n + var e error + if i < 0 { + i += len(d.buf.data) + if written, e = w.Write(d.buf.data[i:]); e != nil { + return written, e + } + i = 0 + } + var k int + k, e = w.Write(d.buf.data[i:d.buf.rear]) + written += k + if e != nil { + err = e + } + return written, err +} + +// Buffered returns the number of bytes in the buffer. +func (d *encoderDict) Buffered() int { return d.buf.Buffered() } diff --git a/vendor/github.com/ulikunitz/xz/lzma/fox.lzma b/vendor/github.com/ulikunitz/xz/lzma/fox.lzma new file mode 100644 index 0000000000..5edad63326 Binary files /dev/null and b/vendor/github.com/ulikunitz/xz/lzma/fox.lzma differ diff --git a/vendor/github.com/ulikunitz/xz/lzma/hashtable.go b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go new file mode 100644 index 0000000000..f66e9cdd9e --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go @@ -0,0 +1,309 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + + "github.com/ulikunitz/xz/internal/hash" +) + +/* For compression we need to find byte sequences that match the byte + * sequence at the dictionary head. A hash table is a simple method to + * provide this capability. + */ + +// maxMatches limits the number of matches requested from the Matches +// function. This controls the speed of the overall encoding. +const maxMatches = 16 + +// shortDists defines the number of short distances supported by the +// implementation. +const shortDists = 8 + +// The minimum is somehow arbitrary but the maximum is limited by the +// memory requirements of the hash table. +const ( + minTableExponent = 9 + maxTableExponent = 20 +) + +// newRoller contains the function used to create an instance of the +// hash.Roller. +var newRoller = func(n int) hash.Roller { return hash.NewCyclicPoly(n) } + +// hashTable stores the hash table including the rolling hash method. +// +// We implement chained hashing into a circular buffer. Each entry in +// the circular buffer stores the delta distance to the next position with a +// word that has the same hash value. +type hashTable struct { + dict *encoderDict + // actual hash table + t []int64 + // circular list data with the offset to the next word + data []uint32 + front int + // mask for computing the index for the hash table + mask uint64 + // hash offset; initial value is -int64(wordLen) + hoff int64 + // length of the hashed word + wordLen int + // hash roller for computing the hash values for the Write + // method + wr hash.Roller + // hash roller for computing arbitrary hashes + hr hash.Roller + // preallocated slices + p [maxMatches]int64 + distances [maxMatches + shortDists]int +} + +// hashTableExponent derives the hash table exponent from the dictionary +// capacity. +func hashTableExponent(n uint32) int { + e := 30 - nlz32(n) + switch { + case e < minTableExponent: + e = minTableExponent + case e > maxTableExponent: + e = maxTableExponent + } + return e +} + +// newHashTable creates a new hash table for words of length wordLen +func newHashTable(capacity int, wordLen int) (t *hashTable, err error) { + if !(0 < capacity) { + return nil, errors.New( + "newHashTable: capacity must not be negative") + } + exp := hashTableExponent(uint32(capacity)) + if !(1 <= wordLen && wordLen <= 4) { + return nil, errors.New("newHashTable: " + + "argument wordLen out of range") + } + n := 1 << uint(exp) + if n <= 0 { + panic("newHashTable: exponent is too large") + } + t = &hashTable{ + t: make([]int64, n), + data: make([]uint32, capacity), + mask: (uint64(1) << uint(exp)) - 1, + hoff: -int64(wordLen), + wordLen: wordLen, + wr: newRoller(wordLen), + hr: newRoller(wordLen), + } + return t, nil +} + +func (t *hashTable) SetDict(d *encoderDict) { t.dict = d } + +// buffered returns the number of bytes that are currently hashed. +func (t *hashTable) buffered() int { + n := t.hoff + 1 + switch { + case n <= 0: + return 0 + case n >= int64(len(t.data)): + return len(t.data) + } + return int(n) +} + +// addIndex adds n to an index ensuring that is stays inside the +// circular buffer for the hash chain. +func (t *hashTable) addIndex(i, n int) int { + i += n - len(t.data) + if i < 0 { + i += len(t.data) + } + return i +} + +// putDelta puts the delta instance at the current front of the circular +// chain buffer. +func (t *hashTable) putDelta(delta uint32) { + t.data[t.front] = delta + t.front = t.addIndex(t.front, 1) +} + +// putEntry puts a new entry into the hash table. If there is already a +// value stored it is moved into the circular chain buffer. +func (t *hashTable) putEntry(h uint64, pos int64) { + if pos < 0 { + return + } + i := h & t.mask + old := t.t[i] - 1 + t.t[i] = pos + 1 + var delta int64 + if old >= 0 { + delta = pos - old + if delta > 1<<32-1 || delta > int64(t.buffered()) { + delta = 0 + } + } + t.putDelta(uint32(delta)) +} + +// WriteByte converts a single byte into a hash and puts them into the hash +// table. +func (t *hashTable) WriteByte(b byte) error { + h := t.wr.RollByte(b) + t.hoff++ + t.putEntry(h, t.hoff) + return nil +} + +// Write converts the bytes provided into hash tables and stores the +// abbreviated offsets into the hash table. The method will never return an +// error. +func (t *hashTable) Write(p []byte) (n int, err error) { + for _, b := range p { + // WriteByte doesn't generate an error. + t.WriteByte(b) + } + return len(p), nil +} + +// getMatches the matches for a specific hash. The functions returns the +// number of positions found. +// +// TODO: Make a getDistances because that we are actually interested in. +func (t *hashTable) getMatches(h uint64, positions []int64) (n int) { + if t.hoff < 0 || len(positions) == 0 { + return 0 + } + buffered := t.buffered() + tailPos := t.hoff + 1 - int64(buffered) + rear := t.front - buffered + if rear >= 0 { + rear -= len(t.data) + } + // get the slot for the hash + pos := t.t[h&t.mask] - 1 + delta := pos - tailPos + for { + if delta < 0 { + return n + } + positions[n] = tailPos + delta + n++ + if n >= len(positions) { + return n + } + i := rear + int(delta) + if i < 0 { + i += len(t.data) + } + u := t.data[i] + if u == 0 { + return n + } + delta -= int64(u) + } +} + +// hash computes the rolling hash for the word stored in p. For correct +// results its length must be equal to t.wordLen. +func (t *hashTable) hash(p []byte) uint64 { + var h uint64 + for _, b := range p { + h = t.hr.RollByte(b) + } + return h +} + +// Matches fills the positions slice with potential matches. The +// functions returns the number of positions filled into positions. The +// byte slice p must have word length of the hash table. +func (t *hashTable) Matches(p []byte, positions []int64) int { + if len(p) != t.wordLen { + panic(fmt.Errorf( + "byte slice must have length %d", t.wordLen)) + } + h := t.hash(p) + return t.getMatches(h, positions) +} + +// NextOp identifies the next operation using the hash table. +// +// TODO: Use all repetitions to find matches. +func (t *hashTable) NextOp(rep [4]uint32) operation { + // get positions + data := t.dict.data[:maxMatchLen] + n, _ := t.dict.buf.Peek(data) + data = data[:n] + var p []int64 + if n < t.wordLen { + p = t.p[:0] + } else { + p = t.p[:maxMatches] + n = t.Matches(data[:t.wordLen], p) + p = p[:n] + } + + // convert positions in potential distances + head := t.dict.head + dists := append(t.distances[:0], 1, 2, 3, 4, 5, 6, 7, 8) + for _, pos := range p { + dis := int(head - pos) + if dis > shortDists { + dists = append(dists, dis) + } + } + + // check distances + var m match + dictLen := t.dict.DictLen() + for _, dist := range dists { + if dist > dictLen { + continue + } + + // Here comes a trick. We are only interested in matches + // that are longer than the matches we have been found + // before. So before we test the whole byte sequence at + // the given distance, we test the first byte that would + // make the match longer. If it doesn't match the byte + // to match, we don't to care any longer. + i := t.dict.buf.rear - dist + m.n + if i < 0 { + i += len(t.dict.buf.data) + } + if t.dict.buf.data[i] != data[m.n] { + // We can't get a longer match. Jump to the next + // distance. + continue + } + + n := t.dict.buf.matchLen(dist, data) + switch n { + case 0: + continue + case 1: + if uint32(dist-minDistance) != rep[0] { + continue + } + } + if n > m.n { + m = match{int64(dist), n} + if n == len(data) { + // No better match will be found. + break + } + } + } + + if m.n == 0 { + return lit{data[0]} + } + return m +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/header.go b/vendor/github.com/ulikunitz/xz/lzma/header.go new file mode 100644 index 0000000000..1ae7d80cab --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/header.go @@ -0,0 +1,167 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// uint32LE reads an uint32 integer from a byte slice +func uint32LE(b []byte) uint32 { + x := uint32(b[3]) << 24 + x |= uint32(b[2]) << 16 + x |= uint32(b[1]) << 8 + x |= uint32(b[0]) + return x +} + +// uint64LE converts the uint64 value stored as little endian to an uint64 +// value. +func uint64LE(b []byte) uint64 { + x := uint64(b[7]) << 56 + x |= uint64(b[6]) << 48 + x |= uint64(b[5]) << 40 + x |= uint64(b[4]) << 32 + x |= uint64(b[3]) << 24 + x |= uint64(b[2]) << 16 + x |= uint64(b[1]) << 8 + x |= uint64(b[0]) + return x +} + +// putUint32LE puts an uint32 integer into a byte slice that must have at least +// a length of 4 bytes. +func putUint32LE(b []byte, x uint32) { + b[0] = byte(x) + b[1] = byte(x >> 8) + b[2] = byte(x >> 16) + b[3] = byte(x >> 24) +} + +// putUint64LE puts the uint64 value into the byte slice as little endian +// value. The byte slice b must have at least place for 8 bytes. +func putUint64LE(b []byte, x uint64) { + b[0] = byte(x) + b[1] = byte(x >> 8) + b[2] = byte(x >> 16) + b[3] = byte(x >> 24) + b[4] = byte(x >> 32) + b[5] = byte(x >> 40) + b[6] = byte(x >> 48) + b[7] = byte(x >> 56) +} + +// noHeaderSize defines the value of the length field in the LZMA header. +const noHeaderSize uint64 = 1<<64 - 1 + +// HeaderLen provides the length of the LZMA file header. +const HeaderLen = 13 + +// header represents the header of an LZMA file. +type header struct { + properties Properties + dictCap int + // uncompressed size; negative value if no size is given + size int64 +} + +// marshalBinary marshals the header. +func (h *header) marshalBinary() (data []byte, err error) { + if err = h.properties.verify(); err != nil { + return nil, err + } + if !(0 <= h.dictCap && int64(h.dictCap) <= MaxDictCap) { + return nil, fmt.Errorf("lzma: DictCap %d out of range", + h.dictCap) + } + + data = make([]byte, 13) + + // property byte + data[0] = h.properties.Code() + + // dictionary capacity + putUint32LE(data[1:5], uint32(h.dictCap)) + + // uncompressed size + var s uint64 + if h.size > 0 { + s = uint64(h.size) + } else { + s = noHeaderSize + } + putUint64LE(data[5:], s) + + return data, nil +} + +// unmarshalBinary unmarshals the header. +func (h *header) unmarshalBinary(data []byte) error { + if len(data) != HeaderLen { + return errors.New("lzma.unmarshalBinary: data has wrong length") + } + + // properties + var err error + if h.properties, err = PropertiesForCode(data[0]); err != nil { + return err + } + + // dictionary capacity + h.dictCap = int(uint32LE(data[1:])) + if h.dictCap < 0 { + return errors.New( + "LZMA header: dictionary capacity exceeds maximum " + + "integer") + } + + // uncompressed size + s := uint64LE(data[5:]) + if s == noHeaderSize { + h.size = -1 + } else { + h.size = int64(s) + if h.size < 0 { + return errors.New( + "LZMA header: uncompressed size " + + "out of int64 range") + } + } + + return nil +} + +// validDictCap checks whether the dictionary capacity is correct. This +// is used to weed out wrong file headers. +func validDictCap(dictcap int) bool { + if int64(dictcap) == MaxDictCap { + return true + } + for n := uint(10); n < 32; n++ { + if dictcap == 1<= 10 or 2^32-1. If +// there is an explicit size it must not exceed 256 GiB. The length of +// the data argument must be HeaderLen. +func ValidHeader(data []byte) bool { + var h header + if err := h.unmarshalBinary(data); err != nil { + return false + } + if !validDictCap(h.dictCap) { + return false + } + return h.size < 0 || h.size <= 1<<38 +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/header2.go b/vendor/github.com/ulikunitz/xz/lzma/header2.go new file mode 100644 index 0000000000..081fc840b6 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/header2.go @@ -0,0 +1,398 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" + "io" +) + +const ( + // maximum size of compressed data in a chunk + maxCompressed = 1 << 16 + // maximum size of uncompressed data in a chunk + maxUncompressed = 1 << 21 +) + +// chunkType represents the type of an LZMA2 chunk. Note that this +// value is an internal representation and no actual encoding of a LZMA2 +// chunk header. +type chunkType byte + +// Possible values for the chunk type. +const ( + // end of stream + cEOS chunkType = iota + // uncompressed; reset dictionary + cUD + // uncompressed; no reset of dictionary + cU + // LZMA compressed; no reset + cL + // LZMA compressed; reset state + cLR + // LZMA compressed; reset state; new property value + cLRN + // LZMA compressed; reset state; new property value; reset dictionary + cLRND +) + +// chunkTypeStrings provide a string representation for the chunk types. +var chunkTypeStrings = [...]string{ + cEOS: "EOS", + cU: "U", + cUD: "UD", + cL: "L", + cLR: "LR", + cLRN: "LRN", + cLRND: "LRND", +} + +// String returns a string representation of the chunk type. +func (c chunkType) String() string { + if !(cEOS <= c && c <= cLRND) { + return "unknown" + } + return chunkTypeStrings[c] +} + +// Actual encodings for the chunk types in the value. Note that the high +// uncompressed size bits are stored in the header byte additionally. +const ( + hEOS = 0 + hUD = 1 + hU = 2 + hL = 1 << 7 + hLR = 1<<7 | 1<<5 + hLRN = 1<<7 | 1<<6 + hLRND = 1<<7 | 1<<6 | 1<<5 +) + +// errHeaderByte indicates an unsupported value for the chunk header +// byte. These bytes starts the variable-length chunk header. +var errHeaderByte = errors.New("lzma: unsupported chunk header byte") + +// headerChunkType converts the header byte into a chunk type. It +// ignores the uncompressed size bits in the chunk header byte. +func headerChunkType(h byte) (c chunkType, err error) { + if h&hL == 0 { + // no compression + switch h { + case hEOS: + c = cEOS + case hUD: + c = cUD + case hU: + c = cU + default: + return 0, errHeaderByte + } + return + } + switch h & hLRND { + case hL: + c = cL + case hLR: + c = cLR + case hLRN: + c = cLRN + case hLRND: + c = cLRND + default: + return 0, errHeaderByte + } + return +} + +// uncompressedHeaderLen provides the length of an uncompressed header +const uncompressedHeaderLen = 3 + +// headerLen returns the length of the LZMA2 header for a given chunk +// type. +func headerLen(c chunkType) int { + switch c { + case cEOS: + return 1 + case cU, cUD: + return uncompressedHeaderLen + case cL, cLR: + return 5 + case cLRN, cLRND: + return 6 + } + panic(fmt.Errorf("unsupported chunk type %d", c)) +} + +// chunkHeader represents the contents of a chunk header. +type chunkHeader struct { + ctype chunkType + uncompressed uint32 + compressed uint16 + props Properties +} + +// String returns a string representation of the chunk header. +func (h *chunkHeader) String() string { + return fmt.Sprintf("%s %d %d %s", h.ctype, h.uncompressed, + h.compressed, &h.props) +} + +// UnmarshalBinary reads the content of the chunk header from the data +// slice. The slice must have the correct length. +func (h *chunkHeader) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return errors.New("no data") + } + c, err := headerChunkType(data[0]) + if err != nil { + return err + } + + n := headerLen(c) + if len(data) < n { + return errors.New("incomplete data") + } + if len(data) > n { + return errors.New("invalid data length") + } + + *h = chunkHeader{ctype: c} + if c == cEOS { + return nil + } + + h.uncompressed = uint32(uint16BE(data[1:3])) + if c <= cU { + return nil + } + h.uncompressed |= uint32(data[0]&^hLRND) << 16 + + h.compressed = uint16BE(data[3:5]) + if c <= cLR { + return nil + } + + h.props, err = PropertiesForCode(data[5]) + return err +} + +// MarshalBinary encodes the chunk header value. The function checks +// whether the content of the chunk header is correct. +func (h *chunkHeader) MarshalBinary() (data []byte, err error) { + if h.ctype > cLRND { + return nil, errors.New("invalid chunk type") + } + if err = h.props.verify(); err != nil { + return nil, err + } + + data = make([]byte, headerLen(h.ctype)) + + switch h.ctype { + case cEOS: + return data, nil + case cUD: + data[0] = hUD + case cU: + data[0] = hU + case cL: + data[0] = hL + case cLR: + data[0] = hLR + case cLRN: + data[0] = hLRN + case cLRND: + data[0] = hLRND + } + + putUint16BE(data[1:3], uint16(h.uncompressed)) + if h.ctype <= cU { + return data, nil + } + data[0] |= byte(h.uncompressed>>16) &^ hLRND + + putUint16BE(data[3:5], h.compressed) + if h.ctype <= cLR { + return data, nil + } + + data[5] = h.props.Code() + return data, nil +} + +// readChunkHeader reads the chunk header from the IO reader. +func readChunkHeader(r io.Reader) (h *chunkHeader, err error) { + p := make([]byte, 1, 6) + if _, err = io.ReadFull(r, p); err != nil { + return + } + c, err := headerChunkType(p[0]) + if err != nil { + return + } + p = p[:headerLen(c)] + if _, err = io.ReadFull(r, p[1:]); err != nil { + return + } + h = new(chunkHeader) + if err = h.UnmarshalBinary(p); err != nil { + return nil, err + } + return h, nil +} + +// uint16BE converts a big-endian uint16 representation to an uint16 +// value. +func uint16BE(p []byte) uint16 { + return uint16(p[0])<<8 | uint16(p[1]) +} + +// putUint16BE puts the big-endian uint16 presentation into the given +// slice. +func putUint16BE(p []byte, x uint16) { + p[0] = byte(x >> 8) + p[1] = byte(x) +} + +// chunkState is used to manage the state of the chunks +type chunkState byte + +// start and stop define the initial and terminating state of the chunk +// state +const ( + start chunkState = 'S' + stop chunkState = 'T' +) + +// errors for the chunk state handling +var ( + errChunkType = errors.New("lzma: unexpected chunk type") + errState = errors.New("lzma: wrong chunk state") +) + +// next transitions state based on chunk type input +func (c *chunkState) next(ctype chunkType) error { + switch *c { + // start state + case 'S': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cLRND: + *c = 'L' + default: + return errChunkType + } + // normal LZMA mode + case 'L': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cU: + *c = 'U' + case cL, cLR, cLRN, cLRND: + break + default: + return errChunkType + } + // reset required + case 'R': + switch ctype { + case cEOS: + *c = 'T' + case cUD, cU: + break + case cLRN, cLRND: + *c = 'L' + default: + return errChunkType + } + // uncompressed + case 'U': + switch ctype { + case cEOS: + *c = 'T' + case cUD: + *c = 'R' + case cU: + break + case cL, cLR, cLRN, cLRND: + *c = 'L' + default: + return errChunkType + } + // terminal state + case 'T': + return errChunkType + default: + return errState + } + return nil +} + +// defaultChunkType returns the default chunk type for each chunk state. +func (c chunkState) defaultChunkType() chunkType { + switch c { + case 'S': + return cLRND + case 'L', 'U': + return cL + case 'R': + return cLRN + default: + // no error + return cEOS + } +} + +// maxDictCap defines the maximum dictionary capacity supported by the +// LZMA2 dictionary capacity encoding. +const maxDictCap = 1<<32 - 1 + +// maxDictCapCode defines the maximum dictionary capacity code. +const maxDictCapCode = 40 + +// The function decodes the dictionary capacity byte, but doesn't change +// for the correct range of the given byte. +func decodeDictCap(c byte) int64 { + return (2 | int64(c)&1) << (11 + (c>>1)&0x1f) +} + +// DecodeDictCap decodes the encoded dictionary capacity. The function +// returns an error if the code is out of range. +func DecodeDictCap(c byte) (n int64, err error) { + if c >= maxDictCapCode { + if c == maxDictCapCode { + return maxDictCap, nil + } + return 0, errors.New("lzma: invalid dictionary size code") + } + return decodeDictCap(c), nil +} + +// EncodeDictCap encodes a dictionary capacity. The function returns the +// code for the capacity that is greater or equal n. If n exceeds the +// maximum support dictionary capacity, the maximum value is returned. +func EncodeDictCap(n int64) byte { + a, b := byte(0), byte(40) + for a < b { + c := a + (b-a)>>1 + m := decodeDictCap(c) + if n <= m { + if n == m { + return c + } + b = c + } else { + a = c + 1 + } + } + return a +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go new file mode 100644 index 0000000000..1ea5320a0c --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go @@ -0,0 +1,115 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "errors" + +// maxPosBits defines the number of bits of the position value that are used to +// to compute the posState value. The value is used to select the tree codec +// for length encoding and decoding. +const maxPosBits = 4 + +// minMatchLen and maxMatchLen give the minimum and maximum values for +// encoding and decoding length values. minMatchLen is also used as base +// for the encoded length values. +const ( + minMatchLen = 2 + maxMatchLen = minMatchLen + 16 + 256 - 1 +) + +// lengthCodec support the encoding of the length value. +type lengthCodec struct { + choice [2]prob + low [1 << maxPosBits]treeCodec + mid [1 << maxPosBits]treeCodec + high treeCodec +} + +// deepcopy initializes the lc value as deep copy of the source value. +func (lc *lengthCodec) deepcopy(src *lengthCodec) { + if lc == src { + return + } + lc.choice = src.choice + for i := range lc.low { + lc.low[i].deepcopy(&src.low[i]) + } + for i := range lc.mid { + lc.mid[i].deepcopy(&src.mid[i]) + } + lc.high.deepcopy(&src.high) +} + +// init initializes a new length codec. +func (lc *lengthCodec) init() { + for i := range lc.choice { + lc.choice[i] = probInit + } + for i := range lc.low { + lc.low[i] = makeTreeCodec(3) + } + for i := range lc.mid { + lc.mid[i] = makeTreeCodec(3) + } + lc.high = makeTreeCodec(8) +} + +// Encode encodes the length offset. The length offset l can be compute by +// subtracting minMatchLen (2) from the actual length. +// +// l = length - minMatchLen +func (lc *lengthCodec) Encode(e *rangeEncoder, l uint32, posState uint32, +) (err error) { + if l > maxMatchLen-minMatchLen { + return errors.New("lengthCodec.Encode: l out of range") + } + if l < 8 { + if err = lc.choice[0].Encode(e, 0); err != nil { + return + } + return lc.low[posState].Encode(e, l) + } + if err = lc.choice[0].Encode(e, 1); err != nil { + return + } + if l < 16 { + if err = lc.choice[1].Encode(e, 0); err != nil { + return + } + return lc.mid[posState].Encode(e, l-8) + } + if err = lc.choice[1].Encode(e, 1); err != nil { + return + } + if err = lc.high.Encode(e, l-16); err != nil { + return + } + return nil +} + +// Decode reads the length offset. Add minMatchLen to compute the actual length +// to the length offset l. +func (lc *lengthCodec) Decode(d *rangeDecoder, posState uint32, +) (l uint32, err error) { + var b uint32 + if b, err = lc.choice[0].Decode(d); err != nil { + return + } + if b == 0 { + l, err = lc.low[posState].Decode(d) + return + } + if b, err = lc.choice[1].Decode(d); err != nil { + return + } + if b == 0 { + l, err = lc.mid[posState].Decode(d) + l += 8 + return + } + l, err = lc.high.Decode(d) + l += 16 + return +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go new file mode 100644 index 0000000000..e4ef5fc59c --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go @@ -0,0 +1,125 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// literalCodec supports the encoding of literal. It provides 768 probability +// values per literal state. The upper 512 probabilities are used with the +// context of a match bit. +type literalCodec struct { + probs []prob +} + +// deepcopy initializes literal codec c as a deep copy of the source. +func (c *literalCodec) deepcopy(src *literalCodec) { + if c == src { + return + } + c.probs = make([]prob, len(src.probs)) + copy(c.probs, src.probs) +} + +// init initializes the literal codec. +func (c *literalCodec) init(lc, lp int) { + switch { + case !(minLC <= lc && lc <= maxLC): + panic("lc out of range") + case !(minLP <= lp && lp <= maxLP): + panic("lp out of range") + } + c.probs = make([]prob, 0x300<= 7 { + m := uint32(match) + for { + matchBit := (m >> 7) & 1 + m <<= 1 + bit := (r >> 7) & 1 + r <<= 1 + i := ((1 + matchBit) << 8) | symbol + if err = probs[i].Encode(e, bit); err != nil { + return + } + symbol = (symbol << 1) | bit + if matchBit != bit { + break + } + if symbol >= 0x100 { + break + } + } + } + for symbol < 0x100 { + bit := (r >> 7) & 1 + r <<= 1 + if err = probs[symbol].Encode(e, bit); err != nil { + return + } + symbol = (symbol << 1) | bit + } + return nil +} + +// Decode decodes a literal byte using the range decoder as well as the LZMA +// state, a match byte, and the literal state. +func (c *literalCodec) Decode(d *rangeDecoder, + state uint32, match byte, litState uint32, +) (s byte, err error) { + k := litState * 0x300 + probs := c.probs[k : k+0x300] + symbol := uint32(1) + if state >= 7 { + m := uint32(match) + for { + matchBit := (m >> 7) & 1 + m <<= 1 + i := ((1 + matchBit) << 8) | symbol + bit, err := d.DecodeBit(&probs[i]) + if err != nil { + return 0, err + } + symbol = (symbol << 1) | bit + if matchBit != bit { + break + } + if symbol >= 0x100 { + break + } + } + } + for symbol < 0x100 { + bit, err := d.DecodeBit(&probs[symbol]) + if err != nil { + return 0, err + } + symbol = (symbol << 1) | bit + } + s = byte(symbol - 0x100) + return s, nil +} + +// minLC and maxLC define the range for LC values. +const ( + minLC = 0 + maxLC = 8 +) + +// minLC and maxLC define the range for LP values. +const ( + minLP = 0 + maxLP = 4 +) diff --git a/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go new file mode 100644 index 0000000000..02dfb8bf51 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go @@ -0,0 +1,52 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import "errors" + +// MatchAlgorithm identifies an algorithm to find matches in the +// dictionary. +type MatchAlgorithm byte + +// Supported matcher algorithms. +const ( + HashTable4 MatchAlgorithm = iota + BinaryTree +) + +// maStrings are used by the String method. +var maStrings = map[MatchAlgorithm]string{ + HashTable4: "HashTable4", + BinaryTree: "BinaryTree", +} + +// String returns a string representation of the Matcher. +func (a MatchAlgorithm) String() string { + if s, ok := maStrings[a]; ok { + return s + } + return "unknown" +} + +var errUnsupportedMatchAlgorithm = errors.New( + "lzma: unsupported match algorithm value") + +// verify checks whether the matcher value is supported. +func (a MatchAlgorithm) verify() error { + if _, ok := maStrings[a]; !ok { + return errUnsupportedMatchAlgorithm + } + return nil +} + +func (a MatchAlgorithm) new(dictCap int) (m matcher, err error) { + switch a { + case HashTable4: + return newHashTable(dictCap, 4) + case BinaryTree: + return newBinTree(dictCap) + } + return nil, errUnsupportedMatchAlgorithm +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/operation.go b/vendor/github.com/ulikunitz/xz/lzma/operation.go new file mode 100644 index 0000000000..7b7eddc3d4 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/operation.go @@ -0,0 +1,55 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "fmt" + "unicode" +) + +// operation represents an operation on the dictionary during encoding or +// decoding. +type operation interface { + Len() int +} + +// rep represents a repetition at the given distance and the given length +type match struct { + // supports all possible distance values, including the eos marker + distance int64 + // length + n int +} + +// Len returns the number of bytes matched. +func (m match) Len() int { + return m.n +} + +// String returns a string representation for the repetition. +func (m match) String() string { + return fmt.Sprintf("M{%d,%d}", m.distance, m.n) +} + +// lit represents a single byte literal. +type lit struct { + b byte +} + +// Len returns 1 for the single byte literal. +func (l lit) Len() int { + return 1 +} + +// String returns a string representation for the literal. +func (l lit) String() string { + var c byte + if unicode.IsPrint(rune(l.b)) { + c = l.b + } else { + c = '.' + } + return fmt.Sprintf("L{%c/%02x}", c, l.b) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/prob.go b/vendor/github.com/ulikunitz/xz/lzma/prob.go new file mode 100644 index 0000000000..2feccba11d --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/prob.go @@ -0,0 +1,53 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// movebits defines the number of bits used for the updates of probability +// values. +const movebits = 5 + +// probbits defines the number of bits of a probability value. +const probbits = 11 + +// probInit defines 0.5 as initial value for prob values. +const probInit prob = 1 << (probbits - 1) + +// Type prob represents probabilities. The type can also be used to encode and +// decode single bits. +type prob uint16 + +// Dec decreases the probability. The decrease is proportional to the +// probability value. +func (p *prob) dec() { + *p -= *p >> movebits +} + +// Inc increases the probability. The Increase is proportional to the +// difference of 1 and the probability value. +func (p *prob) inc() { + *p += ((1 << probbits) - *p) >> movebits +} + +// Computes the new bound for a given range using the probability value. +func (p prob) bound(r uint32) uint32 { + return (r >> probbits) * uint32(p) +} + +// Bits returns 1. One is the number of bits that can be encoded or decoded +// with a single prob value. +func (p prob) Bits() int { + return 1 +} + +// Encode encodes the least-significant bit of v. Note that the p value will be +// changed. +func (p *prob) Encode(e *rangeEncoder, v uint32) error { + return e.EncodeBit(v, p) +} + +// Decode decodes a single bit. Note that the p value will change. +func (p *prob) Decode(d *rangeDecoder) (v uint32, err error) { + return d.DecodeBit(p) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/properties.go b/vendor/github.com/ulikunitz/xz/lzma/properties.go new file mode 100644 index 0000000000..15b754ccb9 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/properties.go @@ -0,0 +1,69 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "fmt" +) + +// maximum and minimum values for the LZMA properties. +const ( + minPB = 0 + maxPB = 4 +) + +// maxPropertyCode is the possible maximum of a properties code byte. +const maxPropertyCode = (maxPB+1)*(maxLP+1)*(maxLC+1) - 1 + +// Properties contains the parameters LC, LP and PB. The parameter LC +// defines the number of literal context bits; parameter LP the number +// of literal position bits and PB the number of position bits. +type Properties struct { + LC int + LP int + PB int +} + +// String returns the properties in a string representation. +func (p *Properties) String() string { + return fmt.Sprintf("LC %d LP %d PB %d", p.LC, p.LP, p.PB) +} + +// PropertiesForCode converts a properties code byte into a Properties value. +func PropertiesForCode(code byte) (p Properties, err error) { + if code > maxPropertyCode { + return p, errors.New("lzma: invalid properties code") + } + p.LC = int(code % 9) + code /= 9 + p.LP = int(code % 5) + code /= 5 + p.PB = int(code % 5) + return p, err +} + +// verify checks the properties for correctness. +func (p *Properties) verify() error { + if p == nil { + return errors.New("lzma: properties are nil") + } + if !(minLC <= p.LC && p.LC <= maxLC) { + return errors.New("lzma: lc out of range") + } + if !(minLP <= p.LP && p.LP <= maxLP) { + return errors.New("lzma: lp out of range") + } + if !(minPB <= p.PB && p.PB <= maxPB) { + return errors.New("lzma: pb out of range") + } + return nil +} + +// Code converts the properties to a byte. The function assumes that +// the properties components are all in range. +func (p Properties) Code() byte { + return byte((p.PB*5+p.LP)*9 + p.LC) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go new file mode 100644 index 0000000000..4b0fee3ff8 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go @@ -0,0 +1,222 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" +) + +// rangeEncoder implements range encoding of single bits. The low value can +// overflow therefore we need uint64. The cache value is used to handle +// overflows. +type rangeEncoder struct { + lbw *LimitedByteWriter + nrange uint32 + low uint64 + cacheLen int64 + cache byte +} + +// maxInt64 provides the maximal value of the int64 type +const maxInt64 = 1<<63 - 1 + +// newRangeEncoder creates a new range encoder. +func newRangeEncoder(bw io.ByteWriter) (re *rangeEncoder, err error) { + lbw, ok := bw.(*LimitedByteWriter) + if !ok { + lbw = &LimitedByteWriter{BW: bw, N: maxInt64} + } + return &rangeEncoder{ + lbw: lbw, + nrange: 0xffffffff, + cacheLen: 1}, nil +} + +// Available returns the number of bytes that still can be written. The +// method takes the bytes that will be currently written by Close into +// account. +func (e *rangeEncoder) Available() int64 { + return e.lbw.N - (e.cacheLen + 4) +} + +// writeByte writes a single byte to the underlying writer. An error is +// returned if the limit is reached. The written byte will be counted if +// the underlying writer doesn't return an error. +func (e *rangeEncoder) writeByte(c byte) error { + if e.Available() < 1 { + return ErrLimit + } + return e.lbw.WriteByte(c) +} + +// DirectEncodeBit encodes the least-significant bit of b with probability 1/2. +func (e *rangeEncoder) DirectEncodeBit(b uint32) error { + e.nrange >>= 1 + e.low += uint64(e.nrange) & (0 - (uint64(b) & 1)) + + // normalize + const top = 1 << 24 + if e.nrange >= top { + return nil + } + e.nrange <<= 8 + return e.shiftLow() +} + +// EncodeBit encodes the least significant bit of b. The p value will be +// updated by the function depending on the bit encoded. +func (e *rangeEncoder) EncodeBit(b uint32, p *prob) error { + bound := p.bound(e.nrange) + if b&1 == 0 { + e.nrange = bound + p.inc() + } else { + e.low += uint64(bound) + e.nrange -= bound + p.dec() + } + + // normalize + const top = 1 << 24 + if e.nrange >= top { + return nil + } + e.nrange <<= 8 + return e.shiftLow() +} + +// Close writes a complete copy of the low value. +func (e *rangeEncoder) Close() error { + for i := 0; i < 5; i++ { + if err := e.shiftLow(); err != nil { + return err + } + } + return nil +} + +// shiftLow shifts the low value for 8 bit. The shifted byte is written into +// the byte writer. The cache value is used to handle overflows. +func (e *rangeEncoder) shiftLow() error { + if uint32(e.low) < 0xff000000 || (e.low>>32) != 0 { + tmp := e.cache + for { + err := e.writeByte(tmp + byte(e.low>>32)) + if err != nil { + return err + } + tmp = 0xff + e.cacheLen-- + if e.cacheLen <= 0 { + if e.cacheLen < 0 { + panic("negative cacheLen") + } + break + } + } + e.cache = byte(uint32(e.low) >> 24) + } + e.cacheLen++ + e.low = uint64(uint32(e.low) << 8) + return nil +} + +// rangeDecoder decodes single bits of the range encoding stream. +type rangeDecoder struct { + br io.ByteReader + nrange uint32 + code uint32 +} + +// newRangeDecoder initializes a range decoder. It reads five bytes from the +// reader and therefore may return an error. +func newRangeDecoder(br io.ByteReader) (d *rangeDecoder, err error) { + d = &rangeDecoder{br: br, nrange: 0xffffffff} + + b, err := d.br.ReadByte() + if err != nil { + return nil, err + } + if b != 0 { + return nil, errors.New("newRangeDecoder: first byte not zero") + } + + for i := 0; i < 4; i++ { + if err = d.updateCode(); err != nil { + return nil, err + } + } + + if d.code >= d.nrange { + return nil, errors.New("newRangeDecoder: d.code >= d.nrange") + } + + return d, nil +} + +// possiblyAtEnd checks whether the decoder may be at the end of the stream. +func (d *rangeDecoder) possiblyAtEnd() bool { + return d.code == 0 +} + +// DirectDecodeBit decodes a bit with probability 1/2. The return value b will +// contain the bit at the least-significant position. All other bits will be +// zero. +func (d *rangeDecoder) DirectDecodeBit() (b uint32, err error) { + d.nrange >>= 1 + d.code -= d.nrange + t := 0 - (d.code >> 31) + d.code += d.nrange & t + b = (t + 1) & 1 + + // d.code will stay less then d.nrange + + // normalize + // assume d.code < d.nrange + const top = 1 << 24 + if d.nrange >= top { + return b, nil + } + d.nrange <<= 8 + // d.code < d.nrange will be maintained + return b, d.updateCode() +} + +// decodeBit decodes a single bit. The bit will be returned at the +// least-significant position. All other bits will be zero. The probability +// value will be updated. +func (d *rangeDecoder) DecodeBit(p *prob) (b uint32, err error) { + bound := p.bound(d.nrange) + if d.code < bound { + d.nrange = bound + p.inc() + b = 0 + } else { + d.code -= bound + d.nrange -= bound + p.dec() + b = 1 + } + // normalize + // assume d.code < d.nrange + const top = 1 << 24 + if d.nrange >= top { + return b, nil + } + d.nrange <<= 8 + // d.code < d.nrange will be maintained + return b, d.updateCode() +} + +// updateCode reads a new byte into the code. +func (d *rangeDecoder) updateCode() error { + b, err := d.br.ReadByte() + if err != nil { + return err + } + d.code = (d.code << 8) | uint32(b) + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader.go b/vendor/github.com/ulikunitz/xz/lzma/reader.go new file mode 100644 index 0000000000..ae911c3893 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/reader.go @@ -0,0 +1,100 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lzma supports the decoding and encoding of LZMA streams. +// Reader and Writer support the classic LZMA format. Reader2 and +// Writer2 support the decoding and encoding of LZMA2 streams. +// +// The package is written completely in Go and doesn't rely on any external +// library. +package lzma + +import ( + "errors" + "io" +) + +// ReaderConfig stores the parameters for the reader of the classic LZMA +// format. +type ReaderConfig struct { + DictCap int +} + +// fill converts the zero values of the configuration to the default values. +func (c *ReaderConfig) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader configuration for errors. Zero values will +// be replaced by default values. +func (c *ReaderConfig) Verify() error { + c.fill() + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + return nil +} + +// Reader provides a reader for LZMA files or streams. +type Reader struct { + lzma io.Reader + h header + d *decoder +} + +// NewReader creates a new reader for an LZMA stream using the classic +// format. NewReader reads and checks the header of the LZMA stream. +func NewReader(lzma io.Reader) (r *Reader, err error) { + return ReaderConfig{}.NewReader(lzma) +} + +// NewReader creates a new reader for an LZMA stream in the classic +// format. The function reads and verifies the the header of the LZMA +// stream. +func (c ReaderConfig) NewReader(lzma io.Reader) (r *Reader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + data := make([]byte, HeaderLen) + if _, err := io.ReadFull(lzma, data); err != nil { + if err == io.EOF { + return nil, errors.New("lzma: unexpected EOF") + } + return nil, err + } + r = &Reader{lzma: lzma} + if err = r.h.unmarshalBinary(data); err != nil { + return nil, err + } + if r.h.dictCap < MinDictCap { + r.h.dictCap = MinDictCap + } + dictCap := r.h.dictCap + if c.DictCap > dictCap { + dictCap = c.DictCap + } + + state := newState(r.h.properties) + dict, err := newDecoderDict(dictCap) + if err != nil { + return nil, err + } + r.d, err = newDecoder(ByteReader(lzma), state, dict, r.h.size) + if err != nil { + return nil, err + } + return r, nil +} + +// EOSMarker indicates that an EOS marker has been encountered. +func (r *Reader) EOSMarker() bool { + return r.d.eosMarker +} + +// Read returns uncompressed data. +func (r *Reader) Read(p []byte) (n int, err error) { + return r.d.Read(p) +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader2.go b/vendor/github.com/ulikunitz/xz/lzma/reader2.go new file mode 100644 index 0000000000..f36e265050 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/reader2.go @@ -0,0 +1,231 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "errors" + "io" + + "github.com/ulikunitz/xz/internal/xlog" +) + +// Reader2Config stores the parameters for the LZMA2 reader. +// format. +type Reader2Config struct { + DictCap int +} + +// fill converts the zero values of the configuration to the default values. +func (c *Reader2Config) fill() { + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } +} + +// Verify checks the reader configuration for errors. Zero configuration values +// will be replaced by default values. +func (c *Reader2Config) Verify() error { + c.fill() + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + return nil +} + +// Reader2 supports the reading of LZMA2 chunk sequences. Note that the +// first chunk should have a dictionary reset and the first compressed +// chunk a properties reset. The chunk sequence may not be terminated by +// an end-of-stream chunk. +type Reader2 struct { + r io.Reader + err error + + dict *decoderDict + ur *uncompressedReader + decoder *decoder + chunkReader io.Reader + + cstate chunkState +} + +// NewReader2 creates a reader for an LZMA2 chunk sequence. +func NewReader2(lzma2 io.Reader) (r *Reader2, err error) { + return Reader2Config{}.NewReader2(lzma2) +} + +// NewReader2 creates an LZMA2 reader using the given configuration. +func (c Reader2Config) NewReader2(lzma2 io.Reader) (r *Reader2, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + r = &Reader2{r: lzma2, cstate: start} + r.dict, err = newDecoderDict(c.DictCap) + if err != nil { + return nil, err + } + if err = r.startChunk(); err != nil { + r.err = err + } + return r, nil +} + +// uncompressed tests whether the chunk type specifies an uncompressed +// chunk. +func uncompressed(ctype chunkType) bool { + return ctype == cU || ctype == cUD +} + +// startChunk parses a new chunk. +func (r *Reader2) startChunk() error { + r.chunkReader = nil + header, err := readChunkHeader(r.r) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + xlog.Debugf("chunk header %v", header) + if err = r.cstate.next(header.ctype); err != nil { + return err + } + if r.cstate == stop { + return io.EOF + } + if header.ctype == cUD || header.ctype == cLRND { + r.dict.Reset() + } + size := int64(header.uncompressed) + 1 + if uncompressed(header.ctype) { + if r.ur != nil { + r.ur.Reopen(r.r, size) + } else { + r.ur = newUncompressedReader(r.r, r.dict, size) + } + r.chunkReader = r.ur + return nil + } + br := ByteReader(io.LimitReader(r.r, int64(header.compressed)+1)) + if r.decoder == nil { + state := newState(header.props) + r.decoder, err = newDecoder(br, state, r.dict, size) + if err != nil { + return err + } + r.chunkReader = r.decoder + return nil + } + switch header.ctype { + case cLR: + r.decoder.State.Reset() + case cLRN, cLRND: + r.decoder.State = newState(header.props) + } + err = r.decoder.Reopen(br, size) + if err != nil { + return err + } + r.chunkReader = r.decoder + return nil +} + +// Read reads data from the LZMA2 chunk sequence. +func (r *Reader2) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + for n < len(p) { + var k int + k, err = r.chunkReader.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + err = r.startChunk() + if err == nil { + continue + } + } + r.err = err + return n, err + } + if k == 0 { + r.err = errors.New("lzma: Reader2 doesn't get data") + return n, r.err + } + } + return n, nil +} + +// EOS returns whether the LZMA2 stream has been terminated by an +// end-of-stream chunk. +func (r *Reader2) EOS() bool { + return r.cstate == stop +} + +// uncompressedReader is used to read uncompressed chunks. +type uncompressedReader struct { + lr io.LimitedReader + Dict *decoderDict + eof bool + err error +} + +// newUncompressedReader initializes a new uncompressedReader. +func newUncompressedReader(r io.Reader, dict *decoderDict, size int64) *uncompressedReader { + ur := &uncompressedReader{ + lr: io.LimitedReader{R: r, N: size}, + Dict: dict, + } + return ur +} + +// Reopen reinitializes an uncompressed reader. +func (ur *uncompressedReader) Reopen(r io.Reader, size int64) { + ur.err = nil + ur.eof = false + ur.lr = io.LimitedReader{R: r, N: size} +} + +// fill reads uncompressed data into the dictionary. +func (ur *uncompressedReader) fill() error { + if !ur.eof { + n, err := io.CopyN(ur.Dict, &ur.lr, int64(ur.Dict.Available())) + if err != io.EOF { + return err + } + ur.eof = true + if n > 0 { + return nil + } + } + if ur.lr.N != 0 { + return io.ErrUnexpectedEOF + } + return io.EOF +} + +// Read reads uncompressed data from the limited reader. +func (ur *uncompressedReader) Read(p []byte) (n int, err error) { + if ur.err != nil { + return 0, ur.err + } + for { + var k int + k, err = ur.Dict.Read(p[n:]) + n += k + if n >= len(p) { + return n, nil + } + if err != nil { + break + } + err = ur.fill() + if err != nil { + break + } + } + ur.err = err + return n, err +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/state.go b/vendor/github.com/ulikunitz/xz/lzma/state.go new file mode 100644 index 0000000000..34779c513c --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/state.go @@ -0,0 +1,145 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// states defines the overall state count +const states = 12 + +// State maintains the full state of the operation encoding or decoding +// process. +type state struct { + rep [4]uint32 + isMatch [states << maxPosBits]prob + isRepG0Long [states << maxPosBits]prob + isRep [states]prob + isRepG0 [states]prob + isRepG1 [states]prob + isRepG2 [states]prob + litCodec literalCodec + lenCodec lengthCodec + repLenCodec lengthCodec + distCodec distCodec + state uint32 + posBitMask uint32 + Properties Properties +} + +// initProbSlice initializes a slice of probabilities. +func initProbSlice(p []prob) { + for i := range p { + p[i] = probInit + } +} + +// Reset sets all state information to the original values. +func (s *state) Reset() { + p := s.Properties + *s = state{ + Properties: p, + // dict: s.dict, + posBitMask: (uint32(1) << uint(p.PB)) - 1, + } + initProbSlice(s.isMatch[:]) + initProbSlice(s.isRep[:]) + initProbSlice(s.isRepG0[:]) + initProbSlice(s.isRepG1[:]) + initProbSlice(s.isRepG2[:]) + initProbSlice(s.isRepG0Long[:]) + s.litCodec.init(p.LC, p.LP) + s.lenCodec.init() + s.repLenCodec.init() + s.distCodec.init() +} + +// newState creates a new state from the give Properties. +func newState(p Properties) *state { + s := &state{Properties: p} + s.Reset() + return s +} + +// deepcopy initializes s as a deep copy of the source. +func (s *state) deepcopy(src *state) { + if s == src { + return + } + s.rep = src.rep + s.isMatch = src.isMatch + s.isRepG0Long = src.isRepG0Long + s.isRep = src.isRep + s.isRepG0 = src.isRepG0 + s.isRepG1 = src.isRepG1 + s.isRepG2 = src.isRepG2 + s.litCodec.deepcopy(&src.litCodec) + s.lenCodec.deepcopy(&src.lenCodec) + s.repLenCodec.deepcopy(&src.repLenCodec) + s.distCodec.deepcopy(&src.distCodec) + s.state = src.state + s.posBitMask = src.posBitMask + s.Properties = src.Properties +} + +// cloneState creates a new clone of the give state. +func cloneState(src *state) *state { + s := new(state) + s.deepcopy(src) + return s +} + +// updateStateLiteral updates the state for a literal. +func (s *state) updateStateLiteral() { + switch { + case s.state < 4: + s.state = 0 + return + case s.state < 10: + s.state -= 3 + return + } + s.state -= 6 +} + +// updateStateMatch updates the state for a match. +func (s *state) updateStateMatch() { + if s.state < 7 { + s.state = 7 + } else { + s.state = 10 + } +} + +// updateStateRep updates the state for a repetition. +func (s *state) updateStateRep() { + if s.state < 7 { + s.state = 8 + } else { + s.state = 11 + } +} + +// updateStateShortRep updates the state for a short repetition. +func (s *state) updateStateShortRep() { + if s.state < 7 { + s.state = 9 + } else { + s.state = 11 + } +} + +// states computes the states of the operation codec. +func (s *state) states(dictHead int64) (state1, state2, posState uint32) { + state1 = s.state + posState = uint32(dictHead) & s.posBitMask + state2 = (s.state << maxPosBits) | posState + return +} + +// litState computes the literal state. +func (s *state) litState(prev byte, dictHead int64) uint32 { + lp, lc := uint(s.Properties.LP), uint(s.Properties.LC) + litState := ((uint32(dictHead) & ((1 << lp) - 1)) << lc) | + (uint32(prev) >> (8 - lc)) + return litState +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go new file mode 100644 index 0000000000..36b29b5982 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go @@ -0,0 +1,133 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +// treeCodec encodes or decodes values with a fixed bit size. It is using a +// tree of probability value. The root of the tree is the most-significant bit. +type treeCodec struct { + probTree +} + +// makeTreeCodec makes a tree codec. The bits value must be inside the range +// [1,32]. +func makeTreeCodec(bits int) treeCodec { + return treeCodec{makeProbTree(bits)} +} + +// deepcopy initializes tc as a deep copy of the source. +func (tc *treeCodec) deepcopy(src *treeCodec) { + tc.probTree.deepcopy(&src.probTree) +} + +// Encode uses the range encoder to encode a fixed-bit-size value. +func (tc *treeCodec) Encode(e *rangeEncoder, v uint32) (err error) { + m := uint32(1) + for i := int(tc.bits) - 1; i >= 0; i-- { + b := (v >> uint(i)) & 1 + if err := e.EncodeBit(b, &tc.probs[m]); err != nil { + return err + } + m = (m << 1) | b + } + return nil +} + +// Decodes uses the range decoder to decode a fixed-bit-size value. Errors may +// be caused by the range decoder. +func (tc *treeCodec) Decode(d *rangeDecoder) (v uint32, err error) { + m := uint32(1) + for j := 0; j < int(tc.bits); j++ { + b, err := d.DecodeBit(&tc.probs[m]) + if err != nil { + return 0, err + } + m = (m << 1) | b + } + return m - (1 << uint(tc.bits)), nil +} + +// treeReverseCodec is another tree codec, where the least-significant bit is +// the start of the probability tree. +type treeReverseCodec struct { + probTree +} + +// deepcopy initializes the treeReverseCodec as a deep copy of the +// source. +func (tc *treeReverseCodec) deepcopy(src *treeReverseCodec) { + tc.probTree.deepcopy(&src.probTree) +} + +// makeTreeReverseCodec creates treeReverseCodec value. The bits argument must +// be in the range [1,32]. +func makeTreeReverseCodec(bits int) treeReverseCodec { + return treeReverseCodec{makeProbTree(bits)} +} + +// Encode uses range encoder to encode a fixed-bit-size value. The range +// encoder may cause errors. +func (tc *treeReverseCodec) Encode(v uint32, e *rangeEncoder) (err error) { + m := uint32(1) + for i := uint(0); i < uint(tc.bits); i++ { + b := (v >> i) & 1 + if err := e.EncodeBit(b, &tc.probs[m]); err != nil { + return err + } + m = (m << 1) | b + } + return nil +} + +// Decodes uses the range decoder to decode a fixed-bit-size value. Errors +// returned by the range decoder will be returned. +func (tc *treeReverseCodec) Decode(d *rangeDecoder) (v uint32, err error) { + m := uint32(1) + for j := uint(0); j < uint(tc.bits); j++ { + b, err := d.DecodeBit(&tc.probs[m]) + if err != nil { + return 0, err + } + m = (m << 1) | b + v |= b << j + } + return v, nil +} + +// probTree stores enough probability values to be used by the treeEncode and +// treeDecode methods of the range coder types. +type probTree struct { + probs []prob + bits byte +} + +// deepcopy initializes the probTree value as a deep copy of the source. +func (t *probTree) deepcopy(src *probTree) { + if t == src { + return + } + t.probs = make([]prob, len(src.probs)) + copy(t.probs, src.probs) + t.bits = src.bits +} + +// makeProbTree initializes a probTree structure. +func makeProbTree(bits int) probTree { + if !(1 <= bits && bits <= 32) { + panic("bits outside of range [1,32]") + } + t := probTree{ + bits: byte(bits), + probs: make([]prob, 1< 0 { + c.SizeInHeader = true + } + if !c.SizeInHeader { + c.EOSMarker = true + } +} + +// Verify checks WriterConfig for errors. Verify will replace zero +// values with default values. +func (c *WriterConfig) Verify() error { + c.fill() + var err error + if c == nil { + return errors.New("lzma: WriterConfig is nil") + } + if c.Properties == nil { + return errors.New("lzma: WriterConfig has no Properties set") + } + if err = c.Properties.verify(); err != nil { + return err + } + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + if !(maxMatchLen <= c.BufSize) { + return errors.New("lzma: lookahead buffer size too small") + } + if c.SizeInHeader { + if c.Size < 0 { + return errors.New("lzma: negative size not supported") + } + } else if !c.EOSMarker { + return errors.New("lzma: EOS marker is required") + } + if err = c.Matcher.verify(); err != nil { + return err + } + + return nil +} + +// header returns the header structure for this configuration. +func (c *WriterConfig) header() header { + h := header{ + properties: *c.Properties, + dictCap: c.DictCap, + size: -1, + } + if c.SizeInHeader { + h.size = c.Size + } + return h +} + +// Writer writes an LZMA stream in the classic format. +type Writer struct { + h header + bw io.ByteWriter + buf *bufio.Writer + e *encoder +} + +// NewWriter creates a new LZMA writer for the classic format. The +// method will write the header to the underlying stream. +func (c WriterConfig) NewWriter(lzma io.Writer) (w *Writer, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer{h: c.header()} + + var ok bool + w.bw, ok = lzma.(io.ByteWriter) + if !ok { + w.buf = bufio.NewWriter(lzma) + w.bw = w.buf + } + state := newState(w.h.properties) + m, err := c.Matcher.new(w.h.dictCap) + if err != nil { + return nil, err + } + dict, err := newEncoderDict(w.h.dictCap, c.BufSize, m) + if err != nil { + return nil, err + } + var flags encoderFlags + if c.EOSMarker { + flags = eosMarker + } + if w.e, err = newEncoder(w.bw, state, dict, flags); err != nil { + return nil, err + } + + if err = w.writeHeader(); err != nil { + return nil, err + } + return w, nil +} + +// NewWriter creates a new LZMA writer using the classic format. The +// function writes the header to the underlying stream. +func NewWriter(lzma io.Writer) (w *Writer, err error) { + return WriterConfig{}.NewWriter(lzma) +} + +// writeHeader writes the LZMA header into the stream. +func (w *Writer) writeHeader() error { + data, err := w.h.marshalBinary() + if err != nil { + return err + } + _, err = w.bw.(io.Writer).Write(data) + return err +} + +// Write puts data into the Writer. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.h.size >= 0 { + m := w.h.size + m -= w.e.Compressed() + int64(w.e.dict.Buffered()) + if m < 0 { + m = 0 + } + if m < int64(len(p)) { + p = p[:m] + err = ErrNoSpace + } + } + var werr error + if n, werr = w.e.Write(p); werr != nil { + err = werr + } + return n, err +} + +// Close closes the writer stream. It ensures that all data from the +// buffer will be compressed and the LZMA stream will be finished. +func (w *Writer) Close() error { + if w.h.size >= 0 { + n := w.e.Compressed() + int64(w.e.dict.Buffered()) + if n != w.h.size { + return errSize + } + } + err := w.e.Close() + if w.buf != nil { + ferr := w.buf.Flush() + if err == nil { + err = ferr + } + } + return err +} diff --git a/vendor/github.com/ulikunitz/xz/lzma/writer2.go b/vendor/github.com/ulikunitz/xz/lzma/writer2.go new file mode 100644 index 0000000000..97bbafa116 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzma/writer2.go @@ -0,0 +1,305 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lzma + +import ( + "bytes" + "errors" + "io" +) + +// Writer2Config is used to create a Writer2 using parameters. +type Writer2Config struct { + // The properties for the encoding. If the it is nil the value + // {LC: 3, LP: 0, PB: 2} will be chosen. + Properties *Properties + // The capacity of the dictionary. If DictCap is zero, the value + // 8 MiB will be chosen. + DictCap int + // Size of the lookahead buffer; value 0 indicates default size + // 4096 + BufSize int + // Match algorithm + Matcher MatchAlgorithm +} + +// fill replaces zero values with default values. +func (c *Writer2Config) fill() { + if c.Properties == nil { + c.Properties = &Properties{LC: 3, LP: 0, PB: 2} + } + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } + if c.BufSize == 0 { + c.BufSize = 4096 + } +} + +// Verify checks the Writer2Config for correctness. Zero values will be +// replaced by default values. +func (c *Writer2Config) Verify() error { + c.fill() + var err error + if c == nil { + return errors.New("lzma: WriterConfig is nil") + } + if c.Properties == nil { + return errors.New("lzma: WriterConfig has no Properties set") + } + if err = c.Properties.verify(); err != nil { + return err + } + if !(MinDictCap <= c.DictCap && int64(c.DictCap) <= MaxDictCap) { + return errors.New("lzma: dictionary capacity is out of range") + } + if !(maxMatchLen <= c.BufSize) { + return errors.New("lzma: lookahead buffer size too small") + } + if c.Properties.LC+c.Properties.LP > 4 { + return errors.New("lzma: sum of lc and lp exceeds 4") + } + if err = c.Matcher.verify(); err != nil { + return err + } + return nil +} + +// Writer2 supports the creation of an LZMA2 stream. But note that +// written data is buffered, so call Flush or Close to write data to the +// underlying writer. The Close method writes the end-of-stream marker +// to the stream. So you may be able to concatenate the output of two +// writers as long the output of the first writer has only been flushed +// but not closed. +// +// Any change to the fields Properties, DictCap must be done before the +// first call to Write, Flush or Close. +type Writer2 struct { + w io.Writer + + start *state + encoder *encoder + + cstate chunkState + ctype chunkType + + buf bytes.Buffer + lbw LimitedByteWriter +} + +// NewWriter2 creates an LZMA2 chunk sequence writer with the default +// parameters and options. +func NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { + return Writer2Config{}.NewWriter2(lzma2) +} + +// NewWriter2 creates a new LZMA2 writer using the given configuration. +func (c Writer2Config) NewWriter2(lzma2 io.Writer) (w *Writer2, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer2{ + w: lzma2, + start: newState(*c.Properties), + cstate: start, + ctype: start.defaultChunkType(), + } + w.buf.Grow(maxCompressed) + w.lbw = LimitedByteWriter{BW: &w.buf, N: maxCompressed} + m, err := c.Matcher.new(c.DictCap) + if err != nil { + return nil, err + } + d, err := newEncoderDict(c.DictCap, c.BufSize, m) + if err != nil { + return nil, err + } + w.encoder, err = newEncoder(&w.lbw, cloneState(w.start), d, 0) + if err != nil { + return nil, err + } + return w, nil +} + +// written returns the number of bytes written to the current chunk +func (w *Writer2) written() int { + if w.encoder == nil { + return 0 + } + return int(w.encoder.Compressed()) + w.encoder.dict.Buffered() +} + +// errClosed indicates that the writer is closed. +var errClosed = errors.New("lzma: writer closed") + +// Writes data to LZMA2 stream. Note that written data will be buffered. +// Use Flush or Close to ensure that data is written to the underlying +// writer. +func (w *Writer2) Write(p []byte) (n int, err error) { + if w.cstate == stop { + return 0, errClosed + } + for n < len(p) { + m := maxUncompressed - w.written() + if m <= 0 { + panic("lzma: maxUncompressed reached") + } + var q []byte + if n+m < len(p) { + q = p[n : n+m] + } else { + q = p[n:] + } + k, err := w.encoder.Write(q) + n += k + if err != nil && err != ErrLimit { + return n, err + } + if err == ErrLimit || k == m { + if err = w.flushChunk(); err != nil { + return n, err + } + } + } + return n, nil +} + +// writeUncompressedChunk writes an uncompressed chunk to the LZMA2 +// stream. +func (w *Writer2) writeUncompressedChunk() error { + u := w.encoder.Compressed() + if u <= 0 { + return errors.New("lzma: can't write empty uncompressed chunk") + } + if u > maxUncompressed { + panic("overrun of uncompressed data limit") + } + switch w.ctype { + case cLRND: + w.ctype = cUD + default: + w.ctype = cU + } + w.encoder.state = w.start + + header := chunkHeader{ + ctype: w.ctype, + uncompressed: uint32(u - 1), + } + hdata, err := header.MarshalBinary() + if err != nil { + return err + } + if _, err = w.w.Write(hdata); err != nil { + return err + } + _, err = w.encoder.dict.CopyN(w.w, int(u)) + return err +} + +// writeCompressedChunk writes a compressed chunk to the underlying +// writer. +func (w *Writer2) writeCompressedChunk() error { + if w.ctype == cU || w.ctype == cUD { + panic("chunk type uncompressed") + } + + u := w.encoder.Compressed() + if u <= 0 { + return errors.New("writeCompressedChunk: empty chunk") + } + if u > maxUncompressed { + panic("overrun of uncompressed data limit") + } + c := w.buf.Len() + if c <= 0 { + panic("no compressed data") + } + if c > maxCompressed { + panic("overrun of compressed data limit") + } + header := chunkHeader{ + ctype: w.ctype, + uncompressed: uint32(u - 1), + compressed: uint16(c - 1), + props: w.encoder.state.Properties, + } + hdata, err := header.MarshalBinary() + if err != nil { + return err + } + if _, err = w.w.Write(hdata); err != nil { + return err + } + _, err = io.Copy(w.w, &w.buf) + return err +} + +// writes a single chunk to the underlying writer. +func (w *Writer2) writeChunk() error { + u := int(uncompressedHeaderLen + w.encoder.Compressed()) + c := headerLen(w.ctype) + w.buf.Len() + if u < c { + return w.writeUncompressedChunk() + } + return w.writeCompressedChunk() +} + +// flushChunk terminates the current chunk. The encoder will be reset +// to support the next chunk. +func (w *Writer2) flushChunk() error { + if w.written() == 0 { + return nil + } + var err error + if err = w.encoder.Close(); err != nil { + return err + } + if err = w.writeChunk(); err != nil { + return err + } + w.buf.Reset() + w.lbw.N = maxCompressed + if err = w.encoder.Reopen(&w.lbw); err != nil { + return err + } + if err = w.cstate.next(w.ctype); err != nil { + return err + } + w.ctype = w.cstate.defaultChunkType() + w.start = cloneState(w.encoder.state) + return nil +} + +// Flush writes all buffered data out to the underlying stream. This +// could result in multiple chunks to be created. +func (w *Writer2) Flush() error { + if w.cstate == stop { + return errClosed + } + for w.written() > 0 { + if err := w.flushChunk(); err != nil { + return err + } + } + return nil +} + +// Close terminates the LZMA2 stream with an EOS chunk. +func (w *Writer2) Close() error { + if w.cstate == stop { + return errClosed + } + if err := w.Flush(); err != nil { + return nil + } + // write zero byte EOS chunk + _, err := w.w.Write([]byte{0}) + if err != nil { + return err + } + w.cstate = stop + return nil +} diff --git a/vendor/github.com/ulikunitz/xz/lzmafilter.go b/vendor/github.com/ulikunitz/xz/lzmafilter.go new file mode 100644 index 0000000000..bd5f42ee82 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/lzmafilter.go @@ -0,0 +1,117 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "fmt" + "io" + + "github.com/ulikunitz/xz/lzma" +) + +// LZMA filter constants. +const ( + lzmaFilterID = 0x21 + lzmaFilterLen = 3 +) + +// lzmaFilter declares the LZMA2 filter information stored in an xz +// block header. +type lzmaFilter struct { + dictCap int64 +} + +// String returns a representation of the LZMA filter. +func (f lzmaFilter) String() string { + return fmt.Sprintf("LZMA dict cap %#x", f.dictCap) +} + +// id returns the ID for the LZMA2 filter. +func (f lzmaFilter) id() uint64 { return lzmaFilterID } + +// MarshalBinary converts the lzmaFilter in its encoded representation. +func (f lzmaFilter) MarshalBinary() (data []byte, err error) { + c := lzma.EncodeDictCap(f.dictCap) + return []byte{lzmaFilterID, 1, c}, nil +} + +// UnmarshalBinary unmarshals the given data representation of the LZMA2 +// filter. +func (f *lzmaFilter) UnmarshalBinary(data []byte) error { + if len(data) != lzmaFilterLen { + return errors.New("xz: data for LZMA2 filter has wrong length") + } + if data[0] != lzmaFilterID { + return errors.New("xz: wrong LZMA2 filter id") + } + if data[1] != 1 { + return errors.New("xz: wrong LZMA2 filter size") + } + dc, err := lzma.DecodeDictCap(data[2]) + if err != nil { + return errors.New("xz: wrong LZMA2 dictionary size property") + } + + f.dictCap = dc + return nil +} + +// reader creates a new reader for the LZMA2 filter. +func (f lzmaFilter) reader(r io.Reader, c *ReaderConfig) (fr io.Reader, + err error) { + + config := new(lzma.Reader2Config) + if c != nil { + config.DictCap = c.DictCap + } + dc := int(f.dictCap) + if dc < 1 { + return nil, errors.New("xz: LZMA2 filter parameter " + + "dictionary capacity overflow") + } + if dc > config.DictCap { + config.DictCap = dc + } + + fr, err = config.NewReader2(r) + if err != nil { + return nil, err + } + return fr, nil +} + +// writeCloser creates a io.WriteCloser for the LZMA2 filter. +func (f lzmaFilter) writeCloser(w io.WriteCloser, c *WriterConfig, +) (fw io.WriteCloser, err error) { + config := new(lzma.Writer2Config) + if c != nil { + *config = lzma.Writer2Config{ + Properties: c.Properties, + DictCap: c.DictCap, + BufSize: c.BufSize, + Matcher: c.Matcher, + } + } + + dc := int(f.dictCap) + if dc < 1 { + return nil, errors.New("xz: LZMA2 filter parameter " + + "dictionary capacity overflow") + } + if dc > config.DictCap { + config.DictCap = dc + } + + fw, err = config.NewWriter2(w) + if err != nil { + return nil, err + } + return fw, nil +} + +// last returns true, because an LZMA2 filter must be the last filter in +// the filter list. +func (f lzmaFilter) last() bool { return true } diff --git a/vendor/github.com/ulikunitz/xz/make-docs b/vendor/github.com/ulikunitz/xz/make-docs new file mode 100644 index 0000000000..a8c612ce17 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/make-docs @@ -0,0 +1,5 @@ +#!/bin/sh + +set -x +pandoc -t html5 -f markdown -s --css=doc/md.css -o README.html README.md +pandoc -t html5 -f markdown -s --css=doc/md.css -o TODO.html TODO.md diff --git a/vendor/github.com/ulikunitz/xz/none-check.go b/vendor/github.com/ulikunitz/xz/none-check.go new file mode 100644 index 0000000000..6a56a26128 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/none-check.go @@ -0,0 +1,23 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import "hash" + +type noneHash struct{} + +func (h noneHash) Write(p []byte) (n int, err error) { return len(p), nil } + +func (h noneHash) Sum(b []byte) []byte { return b } + +func (h noneHash) Reset() {} + +func (h noneHash) Size() int { return 0 } + +func (h noneHash) BlockSize() int { return 0 } + +func newNoneHash() hash.Hash { + return &noneHash{} +} diff --git a/vendor/github.com/ulikunitz/xz/reader.go b/vendor/github.com/ulikunitz/xz/reader.go new file mode 100644 index 0000000000..bde1412cfe --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/reader.go @@ -0,0 +1,359 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xz supports the compression and decompression of xz files. It +// supports version 1.0.4 of the specification without the non-LZMA2 +// filters. See http://tukaani.org/xz/xz-file-format-1.0.4.txt +package xz + +import ( + "bytes" + "errors" + "fmt" + "hash" + "io" + + "github.com/ulikunitz/xz/internal/xlog" + "github.com/ulikunitz/xz/lzma" +) + +// ReaderConfig defines the parameters for the xz reader. The +// SingleStream parameter requests the reader to assume that the +// underlying stream contains only a single stream. +type ReaderConfig struct { + DictCap int + SingleStream bool +} + +// Verify checks the reader parameters for Validity. Zero values will be +// replaced by default values. +func (c *ReaderConfig) Verify() error { + if c == nil { + return errors.New("xz: reader parameters are nil") + } + lc := lzma.Reader2Config{DictCap: c.DictCap} + if err := lc.Verify(); err != nil { + return err + } + return nil +} + +// Reader supports the reading of one or multiple xz streams. +type Reader struct { + ReaderConfig + + xz io.Reader + sr *streamReader +} + +// streamReader decodes a single xz stream +type streamReader struct { + ReaderConfig + + xz io.Reader + br *blockReader + newHash func() hash.Hash + h header + index []record +} + +// NewReader creates a new xz reader using the default parameters. +// The function reads and checks the header of the first XZ stream. The +// reader will process multiple streams including padding. +func NewReader(xz io.Reader) (r *Reader, err error) { + return ReaderConfig{}.NewReader(xz) +} + +// NewReader creates an xz stream reader. The created reader will be +// able to process multiple streams and padding unless a SingleStream +// has been set in the reader configuration c. +func (c ReaderConfig) NewReader(xz io.Reader) (r *Reader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + r = &Reader{ + ReaderConfig: c, + xz: xz, + } + if r.sr, err = c.newStreamReader(xz); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + return r, nil +} + +var errUnexpectedData = errors.New("xz: unexpected data after stream") + +// Read reads uncompressed data from the stream. +func (r *Reader) Read(p []byte) (n int, err error) { + for n < len(p) { + if r.sr == nil { + if r.SingleStream { + data := make([]byte, 1) + _, err = io.ReadFull(r.xz, data) + if err != io.EOF { + return n, errUnexpectedData + } + return n, io.EOF + } + for { + r.sr, err = r.ReaderConfig.newStreamReader(r.xz) + if err != errPadding { + break + } + } + if err != nil { + return n, err + } + } + k, err := r.sr.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + r.sr = nil + continue + } + return n, err + } + } + return n, nil +} + +var errPadding = errors.New("xz: padding (4 zero bytes) encountered") + +// newStreamReader creates a new xz stream reader using the given configuration +// parameters. NewReader reads and checks the header of the xz stream. +func (c ReaderConfig) newStreamReader(xz io.Reader) (r *streamReader, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + data := make([]byte, HeaderLen) + if _, err := io.ReadFull(xz, data[:4]); err != nil { + return nil, err + } + if bytes.Equal(data[:4], []byte{0, 0, 0, 0}) { + return nil, errPadding + } + if _, err = io.ReadFull(xz, data[4:]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + r = &streamReader{ + ReaderConfig: c, + xz: xz, + index: make([]record, 0, 4), + } + if err = r.h.UnmarshalBinary(data); err != nil { + return nil, err + } + xlog.Debugf("xz header %s", r.h) + if r.newHash, err = newHashFunc(r.h.flags); err != nil { + return nil, err + } + return r, nil +} + +// readTail reads the index body and the xz footer. +func (r *streamReader) readTail() error { + index, n, err := readIndexBody(r.xz, len(r.index)) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + + for i, rec := range r.index { + if rec != index[i] { + return fmt.Errorf("xz: record %d is %v; want %v", + i, rec, index[i]) + } + } + + p := make([]byte, footerLen) + if _, err = io.ReadFull(r.xz, p); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + var f footer + if err = f.UnmarshalBinary(p); err != nil { + return err + } + xlog.Debugf("xz footer %s", f) + if f.flags != r.h.flags { + return errors.New("xz: footer flags incorrect") + } + if f.indexSize != int64(n)+1 { + return errors.New("xz: index size in footer wrong") + } + return nil +} + +// Read reads actual data from the xz stream. +func (r *streamReader) Read(p []byte) (n int, err error) { + for n < len(p) { + if r.br == nil { + bh, hlen, err := readBlockHeader(r.xz) + if err != nil { + if err == errIndexIndicator { + if err = r.readTail(); err != nil { + return n, err + } + return n, io.EOF + } + return n, err + } + xlog.Debugf("block %v", *bh) + r.br, err = r.ReaderConfig.newBlockReader(r.xz, bh, + hlen, r.newHash()) + if err != nil { + return n, err + } + } + k, err := r.br.Read(p[n:]) + n += k + if err != nil { + if err == io.EOF { + r.index = append(r.index, r.br.record()) + r.br = nil + } else { + return n, err + } + } + } + return n, nil +} + +// countingReader is a reader that counts the bytes read. +type countingReader struct { + r io.Reader + n int64 +} + +// Read reads data from the wrapped reader and adds it to the n field. +func (lr *countingReader) Read(p []byte) (n int, err error) { + n, err = lr.r.Read(p) + lr.n += int64(n) + return n, err +} + +// blockReader supports the reading of a block. +type blockReader struct { + lxz countingReader + header *blockHeader + headerLen int + n int64 + hash hash.Hash + r io.Reader +} + +// newBlockReader creates a new block reader. +func (c *ReaderConfig) newBlockReader(xz io.Reader, h *blockHeader, + hlen int, hash hash.Hash) (br *blockReader, err error) { + + br = &blockReader{ + lxz: countingReader{r: xz}, + header: h, + headerLen: hlen, + hash: hash, + } + + fr, err := c.newFilterReader(&br.lxz, h.filters) + if err != nil { + return nil, err + } + if br.hash.Size() != 0 { + br.r = io.TeeReader(fr, br.hash) + } else { + br.r = fr + } + + return br, nil +} + +// uncompressedSize returns the uncompressed size of the block. +func (br *blockReader) uncompressedSize() int64 { + return br.n +} + +// compressedSize returns the compressed size of the block. +func (br *blockReader) compressedSize() int64 { + return br.lxz.n +} + +// unpaddedSize computes the unpadded size for the block. +func (br *blockReader) unpaddedSize() int64 { + n := int64(br.headerLen) + n += br.compressedSize() + n += int64(br.hash.Size()) + return n +} + +// record returns the index record for the current block. +func (br *blockReader) record() record { + return record{br.unpaddedSize(), br.uncompressedSize()} +} + +// Read reads data from the block. +func (br *blockReader) Read(p []byte) (n int, err error) { + n, err = br.r.Read(p) + br.n += int64(n) + + u := br.header.uncompressedSize + if u >= 0 && br.uncompressedSize() > u { + return n, errors.New("xz: wrong uncompressed size for block") + } + c := br.header.compressedSize + if c >= 0 && br.compressedSize() > c { + return n, errors.New("xz: wrong compressed size for block") + } + if err != io.EOF { + return n, err + } + if br.uncompressedSize() < u || br.compressedSize() < c { + return n, io.ErrUnexpectedEOF + } + + s := br.hash.Size() + k := padLen(br.lxz.n) + q := make([]byte, k+s, k+2*s) + if _, err = io.ReadFull(br.lxz.r, q); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return n, err + } + if !allZeros(q[:k]) { + return n, errors.New("xz: non-zero block padding") + } + checkSum := q[k:] + computedSum := br.hash.Sum(checkSum[s:]) + if !bytes.Equal(checkSum, computedSum) { + return n, errors.New("xz: checksum error for block") + } + return n, io.EOF +} + +func (c *ReaderConfig) newFilterReader(r io.Reader, f []filter) (fr io.Reader, + err error) { + + if err = verifyFilters(f); err != nil { + return nil, err + } + + fr = r + for i := len(f) - 1; i >= 0; i-- { + fr, err = f[i].reader(fr, c) + if err != nil { + return nil, err + } + } + return fr, nil +} diff --git a/vendor/github.com/ulikunitz/xz/writer.go b/vendor/github.com/ulikunitz/xz/writer.go new file mode 100644 index 0000000000..f693e0aef7 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/writer.go @@ -0,0 +1,399 @@ +// Copyright 2014-2022 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import ( + "errors" + "fmt" + "hash" + "io" + + "github.com/ulikunitz/xz/lzma" +) + +// WriterConfig describe the parameters for an xz writer. +type WriterConfig struct { + Properties *lzma.Properties + DictCap int + BufSize int + BlockSize int64 + // checksum method: CRC32, CRC64 or SHA256 (default: CRC64) + CheckSum byte + // Forces NoChecksum (default: false) + NoCheckSum bool + // match algorithm + Matcher lzma.MatchAlgorithm +} + +// fill replaces zero values with default values. +func (c *WriterConfig) fill() { + if c.Properties == nil { + c.Properties = &lzma.Properties{LC: 3, LP: 0, PB: 2} + } + if c.DictCap == 0 { + c.DictCap = 8 * 1024 * 1024 + } + if c.BufSize == 0 { + c.BufSize = 4096 + } + if c.BlockSize == 0 { + c.BlockSize = maxInt64 + } + if c.CheckSum == 0 { + c.CheckSum = CRC64 + } + if c.NoCheckSum { + c.CheckSum = None + } +} + +// Verify checks the configuration for errors. Zero values will be +// replaced by default values. +func (c *WriterConfig) Verify() error { + if c == nil { + return errors.New("xz: writer configuration is nil") + } + c.fill() + lc := lzma.Writer2Config{ + Properties: c.Properties, + DictCap: c.DictCap, + BufSize: c.BufSize, + Matcher: c.Matcher, + } + if err := lc.Verify(); err != nil { + return err + } + if c.BlockSize <= 0 { + return errors.New("xz: block size out of range") + } + if err := verifyFlags(c.CheckSum); err != nil { + return err + } + return nil +} + +// filters creates the filter list for the given parameters. +func (c *WriterConfig) filters() []filter { + return []filter{&lzmaFilter{int64(c.DictCap)}} +} + +// maxInt64 defines the maximum 64-bit signed integer. +const maxInt64 = 1<<63 - 1 + +// verifyFilters checks the filter list for the length and the right +// sequence of filters. +func verifyFilters(f []filter) error { + if len(f) == 0 { + return errors.New("xz: no filters") + } + if len(f) > 4 { + return errors.New("xz: more than four filters") + } + for _, g := range f[:len(f)-1] { + if g.last() { + return errors.New("xz: last filter is not last") + } + } + if !f[len(f)-1].last() { + return errors.New("xz: wrong last filter") + } + return nil +} + +// newFilterWriteCloser converts a filter list into a WriteCloser that +// can be used by a blockWriter. +func (c *WriterConfig) newFilterWriteCloser(w io.Writer, f []filter) (fw io.WriteCloser, err error) { + if err = verifyFilters(f); err != nil { + return nil, err + } + fw = nopWriteCloser(w) + for i := len(f) - 1; i >= 0; i-- { + fw, err = f[i].writeCloser(fw, c) + if err != nil { + return nil, err + } + } + return fw, nil +} + +// nopWCloser implements a WriteCloser with a Close method not doing +// anything. +type nopWCloser struct { + io.Writer +} + +// Close returns nil and doesn't do anything else. +func (c nopWCloser) Close() error { + return nil +} + +// nopWriteCloser converts the Writer into a WriteCloser with a Close +// function that does nothing beside returning nil. +func nopWriteCloser(w io.Writer) io.WriteCloser { + return nopWCloser{w} +} + +// Writer compresses data written to it. It is an io.WriteCloser. +type Writer struct { + WriterConfig + + xz io.Writer + bw *blockWriter + newHash func() hash.Hash + h header + index []record + closed bool +} + +// newBlockWriter creates a new block writer writes the header out. +func (w *Writer) newBlockWriter() error { + var err error + w.bw, err = w.WriterConfig.newBlockWriter(w.xz, w.newHash()) + if err != nil { + return err + } + if err = w.bw.writeHeader(w.xz); err != nil { + return err + } + return nil +} + +// closeBlockWriter closes a block writer and records the sizes in the +// index. +func (w *Writer) closeBlockWriter() error { + var err error + if err = w.bw.Close(); err != nil { + return err + } + w.index = append(w.index, w.bw.record()) + return nil +} + +// NewWriter creates a new xz writer using default parameters. +func NewWriter(xz io.Writer) (w *Writer, err error) { + return WriterConfig{}.NewWriter(xz) +} + +// NewWriter creates a new Writer using the given configuration parameters. +func (c WriterConfig) NewWriter(xz io.Writer) (w *Writer, err error) { + if err = c.Verify(); err != nil { + return nil, err + } + w = &Writer{ + WriterConfig: c, + xz: xz, + h: header{c.CheckSum}, + index: make([]record, 0, 4), + } + if w.newHash, err = newHashFunc(c.CheckSum); err != nil { + return nil, err + } + data, err := w.h.MarshalBinary() + if err != nil { + return nil, fmt.Errorf("w.h.MarshalBinary(): error %w", err) + } + if _, err = xz.Write(data); err != nil { + return nil, err + } + if err = w.newBlockWriter(); err != nil { + return nil, err + } + return w, nil + +} + +// Write compresses the uncompressed data provided. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.closed { + return 0, errClosed + } + for { + k, err := w.bw.Write(p[n:]) + n += k + if err != errNoSpace { + return n, err + } + if err = w.closeBlockWriter(); err != nil { + return n, err + } + if err = w.newBlockWriter(); err != nil { + return n, err + } + } +} + +// Close closes the writer and adds the footer to the Writer. Close +// doesn't close the underlying writer. +func (w *Writer) Close() error { + if w.closed { + return errClosed + } + w.closed = true + var err error + if err = w.closeBlockWriter(); err != nil { + return err + } + + f := footer{flags: w.h.flags} + if f.indexSize, err = writeIndex(w.xz, w.index); err != nil { + return err + } + data, err := f.MarshalBinary() + if err != nil { + return err + } + if _, err = w.xz.Write(data); err != nil { + return err + } + return nil +} + +// countingWriter is a writer that counts all data written to it. +type countingWriter struct { + w io.Writer + n int64 +} + +// Write writes data to the countingWriter. +func (cw *countingWriter) Write(p []byte) (n int, err error) { + n, err = cw.w.Write(p) + cw.n += int64(n) + if err == nil && cw.n < 0 { + return n, errors.New("xz: counter overflow") + } + return +} + +// blockWriter is writes a single block. +type blockWriter struct { + cxz countingWriter + // mw combines io.WriteCloser w and the hash. + mw io.Writer + w io.WriteCloser + n int64 + blockSize int64 + closed bool + headerLen int + + filters []filter + hash hash.Hash +} + +// newBlockWriter creates a new block writer. +func (c *WriterConfig) newBlockWriter(xz io.Writer, hash hash.Hash) (bw *blockWriter, err error) { + bw = &blockWriter{ + cxz: countingWriter{w: xz}, + blockSize: c.BlockSize, + filters: c.filters(), + hash: hash, + } + bw.w, err = c.newFilterWriteCloser(&bw.cxz, bw.filters) + if err != nil { + return nil, err + } + if bw.hash.Size() != 0 { + bw.mw = io.MultiWriter(bw.w, bw.hash) + } else { + bw.mw = bw.w + } + return bw, nil +} + +// writeHeader writes the header. If the function is called after Close +// the commpressedSize and uncompressedSize fields will be filled. +func (bw *blockWriter) writeHeader(w io.Writer) error { + h := blockHeader{ + compressedSize: -1, + uncompressedSize: -1, + filters: bw.filters, + } + if bw.closed { + h.compressedSize = bw.compressedSize() + h.uncompressedSize = bw.uncompressedSize() + } + data, err := h.MarshalBinary() + if err != nil { + return err + } + if _, err = w.Write(data); err != nil { + return err + } + bw.headerLen = len(data) + return nil +} + +// compressed size returns the amount of data written to the underlying +// stream. +func (bw *blockWriter) compressedSize() int64 { + return bw.cxz.n +} + +// uncompressedSize returns the number of data written to the +// blockWriter +func (bw *blockWriter) uncompressedSize() int64 { + return bw.n +} + +// unpaddedSize returns the sum of the header length, the uncompressed +// size of the block and the hash size. +func (bw *blockWriter) unpaddedSize() int64 { + if bw.headerLen <= 0 { + panic("xz: block header not written") + } + n := int64(bw.headerLen) + n += bw.compressedSize() + n += int64(bw.hash.Size()) + return n +} + +// record returns the record for the current stream. Call Close before +// calling this method. +func (bw *blockWriter) record() record { + return record{bw.unpaddedSize(), bw.uncompressedSize()} +} + +var errClosed = errors.New("xz: writer already closed") + +var errNoSpace = errors.New("xz: no space") + +// Write writes uncompressed data to the block writer. +func (bw *blockWriter) Write(p []byte) (n int, err error) { + if bw.closed { + return 0, errClosed + } + + t := bw.blockSize - bw.n + if int64(len(p)) > t { + err = errNoSpace + p = p[:t] + } + + var werr error + n, werr = bw.mw.Write(p) + bw.n += int64(n) + if werr != nil { + return n, werr + } + return n, err +} + +// Close closes the writer. +func (bw *blockWriter) Close() error { + if bw.closed { + return errClosed + } + bw.closed = true + if err := bw.w.Close(); err != nil { + return err + } + s := bw.hash.Size() + k := padLen(bw.cxz.n) + p := make([]byte, k+s) + bw.hash.Sum(p[k:k]) + if _, err := bw.cxz.w.Write(p); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/xanzy/ssh-agent/.gitignore b/vendor/github.com/xanzy/ssh-agent/.gitignore new file mode 100644 index 0000000000..daf913b1b3 --- /dev/null +++ b/vendor/github.com/xanzy/ssh-agent/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/test/performance/vendor/github.com/pelletier/go-toml/LICENSE b/vendor/github.com/xanzy/ssh-agent/LICENSE similarity index 84% rename from test/performance/vendor/github.com/pelletier/go-toml/LICENSE rename to vendor/github.com/xanzy/ssh-agent/LICENSE index f414553c21..8f71f43fee 100644 --- a/test/performance/vendor/github.com/pelletier/go-toml/LICENSE +++ b/vendor/github.com/xanzy/ssh-agent/LICENSE @@ -1,49 +1,3 @@ -The bulk of github.com/pelletier/go-toml is distributed under the MIT license -(see below), with the exception of localtime.go and localtime.test.go. -Those two files have been copied over from Google's civil library at revision -ed46f5086358513cf8c25f8e3f022cb838a49d66, and are distributed under the Apache -2.0 license (see below). - - -github.com/pelletier/go-toml: - - -The MIT License (MIT) - -Copyright (c) 2013 - 2021 Thomas Pelletier, Eric Anderton - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -localtime.go, localtime_test.go: - -Originals: - https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil.go - https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/civil/civil_test.go -Changes: - * Renamed files from civil* to localtime*. - * Package changed from civil to toml. - * 'Local' prefix added to all structs. -License: - https://raw.githubusercontent.com/googleapis/google-cloud-go/ed46f5086358513cf8c25f8e3f022cb838a49d66/LICENSE - - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -224,7 +178,7 @@ License: APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" + boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -232,7 +186,7 @@ License: same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -245,3 +199,4 @@ License: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + diff --git a/vendor/github.com/xanzy/ssh-agent/README.md b/vendor/github.com/xanzy/ssh-agent/README.md new file mode 100644 index 0000000000..e2dfcedca9 --- /dev/null +++ b/vendor/github.com/xanzy/ssh-agent/README.md @@ -0,0 +1,23 @@ +# ssh-agent + +Create a new [agent.Agent](https://godoc.org/golang.org/x/crypto/ssh/agent#Agent) on any type of OS (so including Windows) from any [Go](https://golang.org) application. + +## Limitations + +When compiled for Windows, it will only support [Pageant](http://the.earth.li/~sgtatham/putty/0.66/htmldoc/Chapter9.html#pageant) as the SSH authentication agent. + +## Credits + +Big thanks to [Давид Мзареулян (David Mzareulyan)](https://github.com/davidmz) for creating the [go-pageant](https://github.com/davidmz/go-pageant) package! + +## Issues + +If you have an issue: report it on the [issue tracker](https://github.com/xanzy/ssh-agent/issues) + +## Author + +Sander van Harmelen () + +## License + +The files `pageant_windows.go` and `sshagent_windows.go` have their own license (see file headers). The rest of this package is licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/vendor/github.com/xanzy/ssh-agent/pageant_windows.go b/vendor/github.com/xanzy/ssh-agent/pageant_windows.go new file mode 100644 index 0000000000..21d3cba2a5 --- /dev/null +++ b/vendor/github.com/xanzy/ssh-agent/pageant_windows.go @@ -0,0 +1,143 @@ +// +// Copyright (c) 2014 David Mzareulyan +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of this software +// and associated documentation files (the "Software"), to deal in the Software without restriction, +// including without limitation the rights to use, copy, modify, merge, publish, distribute, +// sublicense, and/or sell copies of the Software, and to permit persons to whom the Software +// is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all copies or substantial +// portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +// BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +// + +// +build windows + +package sshagent + +// see https://github.com/Yasushi/putty/blob/master/windows/winpgntc.c#L155 +// see https://github.com/paramiko/paramiko/blob/master/paramiko/win_pageant.py + +import ( + "encoding/binary" + "errors" + "fmt" + "sync" + "syscall" + "unsafe" +) + +// Maximum size of message can be sent to pageant +const MaxMessageLen = 8192 + +var ( + ErrPageantNotFound = errors.New("pageant process not found") + ErrSendMessage = errors.New("error sending message") + + ErrMessageTooLong = errors.New("message too long") + ErrInvalidMessageFormat = errors.New("invalid message format") + ErrResponseTooLong = errors.New("response too long") +) + +const ( + agentCopydataID = 0x804e50ba + wmCopydata = 74 +) + +type copyData struct { + dwData uintptr + cbData uint32 + lpData unsafe.Pointer +} + +var ( + lock sync.Mutex + + winFindWindow = winAPI("user32.dll", "FindWindowW") + winGetCurrentThreadID = winAPI("kernel32.dll", "GetCurrentThreadId") + winSendMessage = winAPI("user32.dll", "SendMessageW") +) + +func winAPI(dllName, funcName string) func(...uintptr) (uintptr, uintptr, error) { + proc := syscall.MustLoadDLL(dllName).MustFindProc(funcName) + return func(a ...uintptr) (uintptr, uintptr, error) { return proc.Call(a...) } +} + +// Query sends message msg to Pageant and returns response or error. +// 'msg' is raw agent request with length prefix +// Response is raw agent response with length prefix +func query(msg []byte) ([]byte, error) { + if len(msg) > MaxMessageLen { + return nil, ErrMessageTooLong + } + + msgLen := binary.BigEndian.Uint32(msg[:4]) + if len(msg) != int(msgLen)+4 { + return nil, ErrInvalidMessageFormat + } + + lock.Lock() + defer lock.Unlock() + + paWin := pageantWindow() + + if paWin == 0 { + return nil, ErrPageantNotFound + } + + thID, _, _ := winGetCurrentThreadID() + mapName := fmt.Sprintf("PageantRequest%08x", thID) + pMapName, _ := syscall.UTF16PtrFromString(mapName) + + mmap, err := syscall.CreateFileMapping(syscall.InvalidHandle, nil, syscall.PAGE_READWRITE, 0, MaxMessageLen+4, pMapName) + if err != nil { + return nil, err + } + defer syscall.CloseHandle(mmap) + + ptr, err := syscall.MapViewOfFile(mmap, syscall.FILE_MAP_WRITE, 0, 0, 0) + if err != nil { + return nil, err + } + defer syscall.UnmapViewOfFile(ptr) + + mmSlice := (*(*[MaxMessageLen]byte)(unsafe.Pointer(ptr)))[:] + + copy(mmSlice, msg) + + mapNameBytesZ := append([]byte(mapName), 0) + + cds := copyData{ + dwData: agentCopydataID, + cbData: uint32(len(mapNameBytesZ)), + lpData: unsafe.Pointer(&(mapNameBytesZ[0])), + } + + resp, _, _ := winSendMessage(paWin, wmCopydata, 0, uintptr(unsafe.Pointer(&cds))) + + if resp == 0 { + return nil, ErrSendMessage + } + + respLen := binary.BigEndian.Uint32(mmSlice[:4]) + if respLen > MaxMessageLen-4 { + return nil, ErrResponseTooLong + } + + respData := make([]byte, respLen+4) + copy(respData, mmSlice) + + return respData, nil +} + +func pageantWindow() uintptr { + nameP, _ := syscall.UTF16PtrFromString("Pageant") + h, _, _ := winFindWindow(uintptr(unsafe.Pointer(nameP)), uintptr(unsafe.Pointer(nameP))) + return h +} diff --git a/vendor/github.com/xanzy/ssh-agent/sshagent.go b/vendor/github.com/xanzy/ssh-agent/sshagent.go new file mode 100644 index 0000000000..259fea2b63 --- /dev/null +++ b/vendor/github.com/xanzy/ssh-agent/sshagent.go @@ -0,0 +1,49 @@ +// +// Copyright 2015, Sander van Harmelen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// +build !windows + +package sshagent + +import ( + "errors" + "fmt" + "net" + "os" + + "golang.org/x/crypto/ssh/agent" +) + +// New returns a new agent.Agent that uses a unix socket +func New() (agent.Agent, net.Conn, error) { + if !Available() { + return nil, nil, errors.New("SSH agent requested but SSH_AUTH_SOCK not-specified") + } + + sshAuthSock := os.Getenv("SSH_AUTH_SOCK") + + conn, err := net.Dial("unix", sshAuthSock) + if err != nil { + return nil, nil, fmt.Errorf("Error connecting to SSH_AUTH_SOCK: %v", err) + } + + return agent.NewClient(conn), conn, nil +} + +// Available returns true is a auth socket is defined +func Available() bool { + return os.Getenv("SSH_AUTH_SOCK") != "" +} diff --git a/vendor/github.com/xanzy/ssh-agent/sshagent_windows.go b/vendor/github.com/xanzy/ssh-agent/sshagent_windows.go new file mode 100644 index 0000000000..ca77e6a965 --- /dev/null +++ b/vendor/github.com/xanzy/ssh-agent/sshagent_windows.go @@ -0,0 +1,103 @@ +// +// Copyright (c) 2014 David Mzareulyan +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of this software +// and associated documentation files (the "Software"), to deal in the Software without restriction, +// including without limitation the rights to use, copy, modify, merge, publish, distribute, +// sublicense, and/or sell copies of the Software, and to permit persons to whom the Software +// is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all copies or substantial +// portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING +// BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +// + +// +build windows + +package sshagent + +import ( + "errors" + "io" + "net" + "sync" + + "github.com/Microsoft/go-winio" + "golang.org/x/crypto/ssh/agent" +) + +const ( + sshAgentPipe = `\\.\pipe\openssh-ssh-agent` +) + +// Available returns true if Pageant is running +func Available() bool { + if pageantWindow() != 0 { + return true + } + conn, err := winio.DialPipe(sshAgentPipe, nil) + if err != nil { + return false + } + conn.Close() + return true +} + +// New returns a new agent.Agent and the (custom) connection it uses +// to communicate with a running pagent.exe instance (see README.md) +func New() (agent.Agent, net.Conn, error) { + if pageantWindow() != 0 { + return agent.NewClient(&conn{}), nil, nil + } + conn, err := winio.DialPipe(sshAgentPipe, nil) + if err != nil { + return nil, nil, errors.New( + "SSH agent requested, but could not detect Pageant or Windows native SSH agent", + ) + } + return agent.NewClient(conn), nil, nil +} + +type conn struct { + sync.Mutex + buf []byte +} + +func (c *conn) Close() { + c.Lock() + defer c.Unlock() + c.buf = nil +} + +func (c *conn) Write(p []byte) (int, error) { + c.Lock() + defer c.Unlock() + + resp, err := query(p) + if err != nil { + return 0, err + } + + c.buf = append(c.buf, resp...) + + return len(p), nil +} + +func (c *conn) Read(p []byte) (int, error) { + c.Lock() + defer c.Unlock() + + if len(c.buf) == 0 { + return 0, io.EOF + } + + n := copy(p, c.buf) + c.buf = c.buf[n:] + + return n, nil +} diff --git a/vendor/gitlab.com/digitalxero/go-conventional-commit/.gitignore b/vendor/gitlab.com/digitalxero/go-conventional-commit/.gitignore new file mode 100644 index 0000000000..7dd3ef89c1 --- /dev/null +++ b/vendor/gitlab.com/digitalxero/go-conventional-commit/.gitignore @@ -0,0 +1,21 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +#junk +*.swp +.idea/ +*.orig +*_bckp +.DS_Store + +/build/ \ No newline at end of file diff --git a/vendor/gitlab.com/digitalxero/go-conventional-commit/LICENSE b/vendor/gitlab.com/digitalxero/go-conventional-commit/LICENSE new file mode 100644 index 0000000000..a41ea67f62 --- /dev/null +++ b/vendor/gitlab.com/digitalxero/go-conventional-commit/LICENSE @@ -0,0 +1,13 @@ +Copyright 2019 Dj Gilcrease + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, +modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/gitlab.com/digitalxero/go-conventional-commit/Makefile b/vendor/gitlab.com/digitalxero/go-conventional-commit/Makefile new file mode 100644 index 0000000000..ff06d7db92 --- /dev/null +++ b/vendor/gitlab.com/digitalxero/go-conventional-commit/Makefile @@ -0,0 +1,48 @@ +.PHONY: all githooks clean deps test lint out_dir + +OUT_DIR ?= build +GOARCH ?= amd64 +GOOS ?= linux +ifeq ($(GOOS),windows) + BIN_EXTENSION=.exe +endif +SEMVER ?= 0.0.0-$(shell whoami) + +all: deps test lint build + +init: githooks + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(shell go env GOPATH)/bin + +githooks: + git config core.hooksPath .githooks + +out_dir: + mkdir -p $(OUT_DIR) + +test: out_dir + go test -count=1 -race ./... -tags 'release netgo osusergo' -v -coverprofile=$(OUT_DIR)/coverage.out + go tool cover -func=$(OUT_DIR)/coverage.out + +# +# lint go code critical checks +# +# Shortcut for lint for developer +lint: out_dir + $(if $(CI), make ci_lint, make lint_local) + +lint_local: + golangci-lint run --fix -v + +# Running lint with additional checks for sonarscanner +# Used by CI +ci_lint: + golangci-lint run --out-format checkstyle > $(OUT_DIR)/lint-report.xml + +format: + go fmt ./... + +deps: + go get -u ./... + go mod tidy + go mod verify + diff --git a/vendor/gitlab.com/digitalxero/go-conventional-commit/README.md b/vendor/gitlab.com/digitalxero/go-conventional-commit/README.md new file mode 100644 index 0000000000..db0b81d216 --- /dev/null +++ b/vendor/gitlab.com/digitalxero/go-conventional-commit/README.md @@ -0,0 +1,3 @@ +# go-conventional-commit + +golang parser for conventional-commit messages https://www.conventionalcommits.org/ diff --git a/vendor/gitlab.com/digitalxero/go-conventional-commit/conventional_commits.go b/vendor/gitlab.com/digitalxero/go-conventional-commit/conventional_commits.go new file mode 100644 index 0000000000..ce5e6b6192 --- /dev/null +++ b/vendor/gitlab.com/digitalxero/go-conventional-commit/conventional_commits.go @@ -0,0 +1,112 @@ +package conventional_commit + +import ( + "fmt" + "regexp" + "strings" +) + +// nolint:gochecknoglobals +var baseFormatRegex = regexp.MustCompile(`(?is)^(?:(?P[^\(!:]+)(?:\((?P[^\)]+)\))?(?P!)?: (?P[^\n\r]+))(?P.*)`) +var bodyFooterFormatRegex = regexp.MustCompile(`(?isU)^(?:(?P.*))?(?P